mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-13 16:22:53 +00:00
Compare commits
545 Commits
tessr/f
...
e2e-avoid-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
b87431cef7 | ||
|
|
39dee8abc5 | ||
|
|
d0e33b4292 | ||
|
|
8700ca9d1a | ||
|
|
a374f74f7c | ||
|
|
3a234e1144 | ||
|
|
cce0a3c171 | ||
|
|
a4cc8317da | ||
|
|
69f6eee2e4 | ||
|
|
afb6af8bc3 | ||
|
|
0ed3ba6279 | ||
|
|
267aac2e90 | ||
|
|
471f83d557 | ||
|
|
393a02a729 | ||
|
|
bf77c0c544 | ||
|
|
511e52c2fc | ||
|
|
1f76cb1546 | ||
|
|
d56a44b884 | ||
|
|
cbfc04df6d | ||
|
|
a6d20a6660 | ||
|
|
97435139ad | ||
|
|
66084a01dc | ||
|
|
53d53e6205 | ||
|
|
a7ecf49c10 | ||
|
|
31994cadc0 | ||
|
|
e5312942e3 | ||
|
|
4db71da68e | ||
|
|
e801328128 | ||
|
|
4cbaf70d1f | ||
|
|
e5f9dd2736 | ||
|
|
e922016121 | ||
|
|
dc7c212c41 | ||
|
|
4e96c6b234 | ||
|
|
02f8e4c0bd | ||
|
|
3aec71cdd4 | ||
|
|
6dd8984fef | ||
|
|
9a2a7d4307 | ||
|
|
8f06e0c9e7 | ||
|
|
6a94b55d12 | ||
|
|
9e41414a53 | ||
|
|
6ff4c3139c | ||
|
|
e87b0391cb | ||
|
|
4f73748bc8 | ||
|
|
9a3861fb82 | ||
|
|
44ac57489f | ||
|
|
76376e3161 | ||
|
|
dd97ac6e1c | ||
|
|
a751eee7f2 | ||
|
|
c5dc3b267f | ||
|
|
93f462ef86 | ||
|
|
91e277d7b7 | ||
|
|
a341a626e0 | ||
|
|
c3ae6f5b58 | ||
|
|
a393cf8bab | ||
|
|
0e2752ae42 | ||
|
|
97a8f125e0 | ||
|
|
84c15857e4 | ||
|
|
e70445f942 | ||
|
|
478f5321ad | ||
|
|
08e4e2ed3d | ||
|
|
7d63e991c5 | ||
|
|
7638235d33 | ||
|
|
2abfe20114 | ||
|
|
0bf7813c4e | ||
|
|
ff9038e2ce | ||
|
|
00a40835a2 | ||
|
|
c4f77ab6d1 | ||
|
|
2030875056 | ||
|
|
639e145729 | ||
|
|
68ffe8bc64 | ||
|
|
21309ccb7b | ||
|
|
f70396c6fd | ||
|
|
fdc246e4a8 | ||
|
|
78a0a5fe73 | ||
|
|
4f885209aa | ||
|
|
6dd0cf92c8 | ||
|
|
626d9b4fbe | ||
|
|
8addf99f90 | ||
|
|
76c6c67734 | ||
|
|
a46724e4f6 | ||
|
|
40fba3960d | ||
|
|
36a859ae54 | ||
|
|
ab5c63eff3 | ||
|
|
8228936155 | ||
|
|
a12e2bbb60 | ||
|
|
11bebfb6a0 | ||
|
|
4009102e2b | ||
|
|
cabd916517 | ||
|
|
363ea56680 | ||
|
|
aa4854ff8f | ||
|
|
581dd01d47 | ||
|
|
50b00dff71 | ||
|
|
051e127d38 | ||
|
|
5530726df8 | ||
|
|
decac693ab | ||
|
|
7ca0f24040 | ||
|
|
69848bef26 | ||
|
|
2c14d491f6 | ||
|
|
cd248576ea | ||
|
|
c256edc622 | ||
|
|
9d9360774f | ||
|
|
c7c11fc7d5 | ||
|
|
37bc1d74df | ||
|
|
d882f31569 | ||
|
|
ba3f7106b1 | ||
|
|
3ccfb26137 | ||
|
|
96863decca | ||
|
|
d4cda544ae | ||
|
|
800cce80b7 | ||
|
|
e850863296 | ||
|
|
1dec3e139a | ||
|
|
11b920480f | ||
|
|
4f8bcb1cce | ||
|
|
2d95e38986 | ||
|
|
6bb4b688e0 | ||
|
|
a1e1e6c290 | ||
|
|
736364178a | ||
|
|
a99c7188d7 | ||
|
|
a56b10fbef | ||
|
|
15aa35b8d7 | ||
|
|
c769e3e09b | ||
|
|
4334135651 | ||
|
|
ba41d29b50 | ||
|
|
0cb0dd7ce9 | ||
|
|
b2502b5e53 | ||
|
|
0e9bec1b53 | ||
|
|
20720f691c | ||
|
|
e9bafca335 | ||
|
|
414130aee1 | ||
|
|
b0a413eb17 | ||
|
|
b317df4ba5 | ||
|
|
a8bb01136a | ||
|
|
d66d1ada5b | ||
|
|
9d0817b308 | ||
|
|
5ae3c24d55 | ||
|
|
ffadc0ebca | ||
|
|
f8dbbae55e | ||
|
|
ad0c115839 | ||
|
|
98a249c0ea | ||
|
|
09ec247646 | ||
|
|
7eef4d7ac2 | ||
|
|
11a71c228c | ||
|
|
c5cc3c8d3f | ||
|
|
167fa738a3 | ||
|
|
917180dfd2 | ||
|
|
10d174adcc | ||
|
|
9ffa7e8a2b | ||
|
|
2cc872543b | ||
|
|
ae5f98881b | ||
|
|
4c30f084da | ||
|
|
2b0a3c151b | ||
|
|
6e238b5b9d | ||
|
|
144af9a81a | ||
|
|
bed58a749f | ||
|
|
5f553bb3ee | ||
|
|
e8f38e3260 | ||
|
|
6ade17997c | ||
|
|
a6b30faf35 | ||
|
|
09a409df9c | ||
|
|
d228afc548 | ||
|
|
25bb556fee | ||
|
|
c0f7fb08c0 | ||
|
|
7bc7de5092 | ||
|
|
7d961b55b2 | ||
|
|
886519e3ca | ||
|
|
8d0c38257e | ||
|
|
10a41ae6c8 | ||
|
|
e79262415c | ||
|
|
13b95e7127 | ||
|
|
c4cf4d3012 | ||
|
|
21536ac7a4 | ||
|
|
cb63ab4ac0 | ||
|
|
66926d31ca | ||
|
|
74af343f28 | ||
|
|
10fa151749 | ||
|
|
2c81638787 | ||
|
|
a456b71f1f | ||
|
|
32bc399bdd | ||
|
|
4e59575dc0 | ||
|
|
d2b78ec09b | ||
|
|
b824dd3b47 | ||
|
|
2b0955849e | ||
|
|
d6b4bc267b | ||
|
|
6f6ac5c04e | ||
|
|
9c172a1be9 | ||
|
|
2f6141645b | ||
|
|
a855f96946 | ||
|
|
31e7cdeeac | ||
|
|
7cc32f3f0f | ||
|
|
3635c7a382 | ||
|
|
b28887e839 | ||
|
|
eaa0468190 | ||
|
|
00c284d9d7 | ||
|
|
08b134ddbc | ||
|
|
39ddfc24f4 | ||
|
|
663c0bba9c | ||
|
|
618c945d54 | ||
|
|
40fe4a3bf8 | ||
|
|
7ec123c968 | ||
|
|
1f46a4c90e | ||
|
|
96db0ae84a | ||
|
|
915d9de91d | ||
|
|
1e4bc04cd6 | ||
|
|
4e06dfef8c | ||
|
|
b4307ca7f4 | ||
|
|
e3d5a31d6e | ||
|
|
c0fcc5f6bb | ||
|
|
7bf84d9d7f | ||
|
|
89b3b10740 | ||
|
|
1e0efb6758 | ||
|
|
1709e49813 | ||
|
|
97202b06c3 | ||
|
|
6fcf718572 | ||
|
|
de5cf42ed5 | ||
|
|
3faae852a8 | ||
|
|
711a718162 | ||
|
|
76a773d24c | ||
|
|
692f23d589 | ||
|
|
11b5885894 | ||
|
|
e9928f6186 | ||
|
|
68f4a210d3 | ||
|
|
6b353df567 | ||
|
|
07a1b4d618 | ||
|
|
719e028e00 | ||
|
|
72ee5aab26 | ||
|
|
09e0df8479 | ||
|
|
2d0b3a300f | ||
|
|
e2a26c732c | ||
|
|
d9134063e7 | ||
|
|
7b5a732644 | ||
|
|
527dbaaa66 | ||
|
|
a9e231b54d | ||
|
|
0781ca3f50 | ||
|
|
ee230cd0f6 | ||
|
|
fa891c5a4b | ||
|
|
15a67b37d8 | ||
|
|
f969614dc8 | ||
|
|
9cee35bb8c | ||
|
|
230abbe676 | ||
|
|
7648544f27 | ||
|
|
6cd07682ae | ||
|
|
dfa28b77c5 | ||
|
|
c29aaf78d4 | ||
|
|
c320eb1407 | ||
|
|
a9fc0c32b2 | ||
|
|
fdd506a126 | ||
|
|
811dbe439f | ||
|
|
d1c9463bef | ||
|
|
9e2af66a0a | ||
|
|
6abfa1d52f | ||
|
|
64e7b5efea | ||
|
|
e334555393 | ||
|
|
4dded9c852 | ||
|
|
a91680efee | ||
|
|
6fdf665385 | ||
|
|
0b0914b3df | ||
|
|
0740d8626c | ||
|
|
dcc2556e08 | ||
|
|
ec5e3b0b02 | ||
|
|
ea601ccf15 | ||
|
|
09b2aa1bfa | ||
|
|
f366ae3c87 | ||
|
|
7f30bc96f0 | ||
|
|
bc643b19c4 | ||
|
|
76edf7851f | ||
|
|
09a6ad7b1e | ||
|
|
1ec959f8a1 | ||
|
|
1c63d81bd3 | ||
|
|
8eccaf9535 | ||
|
|
20610be98c | ||
|
|
0407cd5258 | ||
|
|
10fc43f182 | ||
|
|
3c50c5a845 | ||
|
|
be2ac87ab0 | ||
|
|
7362756354 | ||
|
|
9efc20c963 | ||
|
|
0bd497352a | ||
|
|
ee70430255 | ||
|
|
0d6055a4db | ||
|
|
399c366185 | ||
|
|
36d8cb09df | ||
|
|
990504cd07 | ||
|
|
aa93ad8a15 | ||
|
|
3503a814f2 | ||
|
|
43eacd159f | ||
|
|
5bafedff17 | ||
|
|
d36a5905a6 | ||
|
|
fea04c8e67 | ||
|
|
05bc2f344a | ||
|
|
fce665ef06 | ||
|
|
8e8de2b2fb | ||
|
|
0c2c0afaf8 | ||
|
|
281b5234b2 | ||
|
|
4d03d7516b | ||
|
|
47b28fd6aa | ||
|
|
ca7dbea05b | ||
|
|
c9125a6625 | ||
|
|
570e24f902 | ||
|
|
2b8aa65e4f | ||
|
|
3761aa6904 | ||
|
|
d4d2b66067 | ||
|
|
be48602c6b | ||
|
|
0dde4826e7 | ||
|
|
b517dd5685 | ||
|
|
f563bd4ff1 | ||
|
|
d0828f3454 | ||
|
|
b13b7620b4 | ||
|
|
3a690568ae | ||
|
|
8b7a53bdb2 | ||
|
|
ae2646a371 | ||
|
|
a818f914ab | ||
|
|
b272746444 | ||
|
|
0f41f7465c | ||
|
|
0b770870c9 | ||
|
|
dbfd6fc613 | ||
|
|
845efdb76e | ||
|
|
cae3a75121 | ||
|
|
d3570c6734 | ||
|
|
10f4b7de64 | ||
|
|
6d9372bd39 | ||
|
|
bcdf923cb8 | ||
|
|
1c4dbe30d4 | ||
|
|
46e06c9732 | ||
|
|
053651160f | ||
|
|
358d1a28b8 | ||
|
|
f2f085c7a3 | ||
|
|
19393f0c28 | ||
|
|
0811c7be99 | ||
|
|
d0b513c182 | ||
|
|
3ed8f14bf9 | ||
|
|
70ea675f38 | ||
|
|
e2dc241c9d | ||
|
|
c62e320ffd | ||
|
|
91506bf25d | ||
|
|
cbae3613dd | ||
|
|
32ee737d42 | ||
|
|
6a34fd8fc3 | ||
|
|
9ecfcc93a6 | ||
|
|
63f6c26072 | ||
|
|
d988cd6101 | ||
|
|
ca080b5a7f | ||
|
|
0124593a61 | ||
|
|
19a375aaab | ||
|
|
b5e4147e5a | ||
|
|
a554005136 | ||
|
|
2ceb816721 | ||
|
|
3f9066b290 | ||
|
|
7d53cb232c | ||
|
|
1d671a147f | ||
|
|
d43a79ddf6 | ||
|
|
003f394512 | ||
|
|
99c73dd3c1 | ||
|
|
206479f1ec | ||
|
|
7d68a56f32 | ||
|
|
6f6083dae3 | ||
|
|
dd84867e2b | ||
|
|
9f7051d38a | ||
|
|
5c547137f6 | ||
|
|
c52de487e7 | ||
|
|
acbe3f6570 | ||
|
|
c553c0cf20 | ||
|
|
fa51b652e8 | ||
|
|
b00962ed57 | ||
|
|
efd2fde474 | ||
|
|
fa781e6bb7 | ||
|
|
6ffdf181f2 | ||
|
|
bf8cce83db | ||
|
|
799f87b55b | ||
|
|
72c9364154 | ||
|
|
6b4024ad75 | ||
|
|
00b9524168 | ||
|
|
884d4d5252 | ||
|
|
1c314c830e | ||
|
|
d27cc6e5e9 | ||
|
|
5a7ac0abdb | ||
|
|
418e2c140f | ||
|
|
4540bef665 | ||
|
|
6625d125a9 | ||
|
|
0566bea8b2 | ||
|
|
8c539f7c2b | ||
|
|
be88fd3e63 | ||
|
|
cab10db725 | ||
|
|
90fe178b52 | ||
|
|
0d0181856b | ||
|
|
36d92cd0b6 | ||
|
|
fe4e97afe0 | ||
|
|
ddba7f931a | ||
|
|
089b314bdb | ||
|
|
37d36cd5bc | ||
|
|
e2384a00ce | ||
|
|
f57aaa4925 | ||
|
|
611dd5ad46 | ||
|
|
fc5a108d53 | ||
|
|
e9e5026dac | ||
|
|
bd14bb66e9 | ||
|
|
45572117ae | ||
|
|
0f4124fb54 | ||
|
|
be35c893d3 | ||
|
|
c21f4c6fde | ||
|
|
d5cf7831f1 | ||
|
|
9498cd80bd | ||
|
|
c1ca749afa | ||
|
|
98bbde5385 | ||
|
|
42f6c40751 | ||
|
|
6912c34b58 | ||
|
|
5a39f72aa8 | ||
|
|
93719c2eb3 | ||
|
|
ab69153037 | ||
|
|
29360f955c | ||
|
|
27eb10a2e7 | ||
|
|
8a3637a249 | ||
|
|
90c290ac52 | ||
|
|
54a14cfeed | ||
|
|
a48c6bdf5e | ||
|
|
27b05d6b39 | ||
|
|
6cb4b5d8ad | ||
|
|
07cf71146c | ||
|
|
df0bf4b0ee | ||
|
|
9f697db54f | ||
|
|
bc3e3d134e | ||
|
|
5b52f87789 | ||
|
|
b6be889b97 | ||
|
|
16bbe8c862 | ||
|
|
10b9fb9f21 | ||
|
|
809a5e876c | ||
|
|
670b27fe3c | ||
|
|
df0b868415 | ||
|
|
195916fa32 | ||
|
|
3e6f9329c1 | ||
|
|
2d57043aa4 | ||
|
|
c8a661091d | ||
|
|
d4b04776bd | ||
|
|
a9bad33bac | ||
|
|
f8bacae0c7 | ||
|
|
42650a6a65 | ||
|
|
162f67cf26 | ||
|
|
059d42866c | ||
|
|
79ff92f5bb | ||
|
|
26fd15800d | ||
|
|
e00ffc42d7 | ||
|
|
38c5d28f87 | ||
|
|
ee3f34f4ab | ||
|
|
02b6712379 | ||
|
|
1608484de8 | ||
|
|
aaf8987257 | ||
|
|
b9b55db4e5 | ||
|
|
c4c47f2e7a | ||
|
|
8dd715a028 | ||
|
|
5fd0a3e9e4 | ||
|
|
78a05d3b9e | ||
|
|
c87001ae0d | ||
|
|
740008e32b | ||
|
|
17c511db67 | ||
|
|
9b6d6a3ad0 | ||
|
|
8ea2ac0db7 | ||
|
|
91e1f3548a | ||
|
|
2aad26e2f1 | ||
|
|
fd597dc726 | ||
|
|
059364e840 | ||
|
|
5f88d6aa44 | ||
|
|
b0dfbd1832 | ||
|
|
c7b619188d | ||
|
|
3d01d98f67 | ||
|
|
0b953eb190 | ||
|
|
90d3f56797 | ||
|
|
9d1f77369f | ||
|
|
2a2279e010 | ||
|
|
1cd9bdb80b | ||
|
|
fc71882f74 | ||
|
|
1f39f808e1 | ||
|
|
50b8907581 | ||
|
|
17905cbaa2 | ||
|
|
60bc071ed5 | ||
|
|
b1646e51e2 | ||
|
|
a54f1544f8 | ||
|
|
1f01e5d726 | ||
|
|
c900303ac6 | ||
|
|
363804ac21 | ||
|
|
5a9b740acb | ||
|
|
aead4ab555 | ||
|
|
bd8a9372d2 | ||
|
|
70bb8cc8b7 | ||
|
|
4dca066aab | ||
|
|
6e3c58204a | ||
|
|
f54f80bf0d | ||
|
|
8ce254cdb7 | ||
|
|
a2e684e51f | ||
|
|
3759bc511b | ||
|
|
06de7459c9 | ||
|
|
642ecc3f5c | ||
|
|
b19acfb605 | ||
|
|
937a18468a | ||
|
|
fe5b312337 | ||
|
|
7ea8746ed1 | ||
|
|
51aca684b8 | ||
|
|
8718f6f5ff | ||
|
|
91823eba32 | ||
|
|
b3aae970d8 | ||
|
|
13e772c916 | ||
|
|
81daaacae9 | ||
|
|
a741314c97 | ||
|
|
9e158839f6 | ||
|
|
aecfb0ecf0 | ||
|
|
680fb18414 | ||
|
|
962a82c06e | ||
|
|
d76add65a6 | ||
|
|
7e0436c6e6 | ||
|
|
cd3ebe8754 | ||
|
|
b958ba3440 | ||
|
|
df22e7354c | ||
|
|
9c98af4277 | ||
|
|
ac49ea8bb7 | ||
|
|
a72fb2fbad | ||
|
|
68bd2116f0 | ||
|
|
670e9b427b | ||
|
|
15c1936b85 | ||
|
|
c63854f732 | ||
|
|
527550f372 | ||
|
|
64961e2267 | ||
|
|
eff1b16a0c | ||
|
|
ea77360ecf | ||
|
|
5972105b06 | ||
|
|
af723eca8a | ||
|
|
62d7a5d028 | ||
|
|
96215a06ed | ||
|
|
3ef0b90afd | ||
|
|
d34e7c5b51 | ||
|
|
d8a2eb95bb | ||
|
|
eaa948ab7d | ||
|
|
5cbb8263b4 | ||
|
|
a2bd09253c | ||
|
|
ca285844ea | ||
|
|
74bee8d834 | ||
|
|
211bc08217 | ||
|
|
178d421c77 | ||
|
|
bada08c50c | ||
|
|
956b59af87 | ||
|
|
f05788e632 | ||
|
|
5b698ed13b | ||
|
|
1d16e39c0e | ||
|
|
78e8169750 | ||
|
|
03a6fb2777 | ||
|
|
c61cd3fd05 | ||
|
|
385ea1db7d | ||
|
|
66ba12d9bc |
25
.github/CODEOWNERS
vendored
25
.github/CODEOWNERS
vendored
@@ -1,27 +1,10 @@
|
||||
# CODEOWNERS: https://help.github.com/articles/about-codeowners/
|
||||
|
||||
# Everything goes through the following "global owners" by default.
|
||||
# Everything goes through the following "global owners" by default.
|
||||
# Unless a later match takes precedence, these three will be
|
||||
# requested for review when someone opens a PR.
|
||||
# requested for review when someone opens a PR.
|
||||
# Note that the last matching pattern takes precedence, so
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @ebuchman @erikgrinaker @melekes @tessr
|
||||
|
||||
# Overrides for tooling packages
|
||||
.circleci/ @marbar3778 @ebuchman @erikgrinaker @melekes @tessr
|
||||
.github/ @marbar3778 @ebuchman @erikgrinaker @melekes @tessr
|
||||
DOCKER/ @marbar3778 @ebuchman @erikgrinaker @melekes @tessr
|
||||
|
||||
# Overrides for core Tendermint packages
|
||||
abci/ @marbar3778 @ebuchman @erikgrinaker @melekes @tessr
|
||||
evidence/ @cmwaters @ebuchman @erikgrinaker @melekes @tessr
|
||||
light/ @cmwaters @melekes @ebuchman
|
||||
|
||||
# Overrides for docs
|
||||
*.md @marbar3778 @ebuchman @erikgrinaker @melekes @tessr
|
||||
docs/ @marbar3778 @ebuchman @erikgrinaker @melekes @tessr
|
||||
|
||||
|
||||
|
||||
* @alexanderbez @ebuchman @cmwaters @tessr @tychoish @williambanfield @creachadair
|
||||
|
||||
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
8
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,7 +1,7 @@
|
||||
## Description
|
||||
Please add a description of the changes that this PR introduces and the files that
|
||||
are the most critical to review.
|
||||
|
||||
_Please add a description of the changes that this PR introduces and the files that
|
||||
are the most critical to review._
|
||||
If this PR fixes an open Issue, please include "Closes #XXX" (where "XXX" is the Issue number)
|
||||
so that GitHub will automatically close the Issue when this PR is merged.
|
||||
|
||||
Closes: #XXX
|
||||
|
||||
|
||||
1
.github/dependabot.yml
vendored
1
.github/dependabot.yml
vendored
@@ -23,6 +23,5 @@ updates:
|
||||
reviewers:
|
||||
- melekes
|
||||
- tessr
|
||||
- erikgrinaker
|
||||
labels:
|
||||
- T:dependencies
|
||||
|
||||
8
.github/mergify.yml
vendored
8
.github/mergify.yml
vendored
@@ -8,3 +8,11 @@ pull_request_rules:
|
||||
method: squash
|
||||
strict: true
|
||||
commit_message: title+body
|
||||
- name: backport patches to v0.34.x branch
|
||||
conditions:
|
||||
- base=master
|
||||
- label=S:backport-to-v0.34.x
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.34.x
|
||||
|
||||
22
.github/workflows/coverage.yml
vendored
22
.github/workflows/coverage.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
split-test-files:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- name: Create a file with all the pkgs
|
||||
run: go list ./... > pkgs.txt
|
||||
- name: Split pkgs into 4 files
|
||||
@@ -44,9 +44,9 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -66,9 +66,9 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -81,7 +81,7 @@ jobs:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.15
|
||||
go-version: 1.16
|
||||
- name: test & coverage report creation
|
||||
run: |
|
||||
cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic
|
||||
@@ -95,8 +95,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -121,7 +121,7 @@ jobs:
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v1.2.1
|
||||
- uses: codecov/codecov-action@v2.0.2
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
|
||||
24
.github/workflows/docker.yml
vendored
24
.github/workflows/docker.yml
vendored
@@ -14,10 +14,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@master
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
@@ -37,23 +34,26 @@ jobs:
|
||||
fi
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v1.5.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v1.10.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build Tendermint
|
||||
run: |
|
||||
make build-linux && cp build/tendermint DOCKER/tendermint
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v2.7.0
|
||||
with:
|
||||
context: ./DOCKER
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
|
||||
31
.github/workflows/docs.yml
vendored
31
.github/workflows/docs.yml
vendored
@@ -1,31 +0,0 @@
|
||||
name: Documentation
|
||||
# This job builds and deploys documenation to github pages.
|
||||
# It runs on every push to master.
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build-and-deploy:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: tendermintdev/docker-website-deployment
|
||||
steps:
|
||||
- name: Checkout 🛎️
|
||||
uses: actions/checkout@v2.3.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install and Build 🔧
|
||||
run: |
|
||||
apk add rsync
|
||||
make build-docs
|
||||
|
||||
- name: Deploy 🚀
|
||||
uses: JamesIves/github-pages-deploy-action@3.7.1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
BRANCH: gh-pages
|
||||
FOLDER: ~/output
|
||||
@@ -1,7 +1,12 @@
|
||||
# Runs randomly generated E2E testnets nightly.
|
||||
name: e2e-nightly
|
||||
# Runs randomly generated E2E testnets nightly
|
||||
# on the 0.34.x release branch
|
||||
|
||||
# !! If you change something in this file, you probably want
|
||||
# to update the e2e-nightly-master workflow as well!
|
||||
|
||||
name: e2e-nightly-34x
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
workflow_dispatch: # allow running workflow manually, in theory
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
@@ -18,9 +23,11 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.15'
|
||||
go-version: '1.16'
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
ref: 'v0.34.x'
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
@@ -42,12 +49,28 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on v0.34.x
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on v0.34.x
|
||||
SLACK_FOOTER: ''
|
||||
74
.github/workflows/e2e-nightly-master.yml
vendored
Normal file
74
.github/workflows/e2e-nightly-master.yml
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
# Runs randomly generated E2E testnets nightly on master
|
||||
|
||||
# !! If you change something in this file, you probably want
|
||||
# to update the e2e-nightly-34x workflow as well!
|
||||
|
||||
name: e2e-nightly-master
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test-2:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
p2p: ['legacy', 'new', 'hybrid']
|
||||
group: ['00', '01']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.16'
|
||||
|
||||
- uses: actions/checkout@v2.3.4
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 2 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail-2:
|
||||
needs: e2e-nightly-test-2
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on master
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test-2
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on master
|
||||
SLACK_FOOTER: ''
|
||||
7
.github/workflows/e2e.yml
vendored
7
.github/workflows/e2e.yml
vendored
@@ -2,6 +2,7 @@ name: e2e
|
||||
# Runs the CI end-to-end test network on all pushes to master or release branches
|
||||
# and every pull request, but only if any Go files have been changed.
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
@@ -15,9 +16,9 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.15'
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: '1.16'
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
|
||||
92
.github/workflows/fuzz-nightly.yml
vendored
Normal file
92
.github/workflows/fuzz-nightly.yml
vendored
Normal file
@@ -0,0 +1,92 @@
|
||||
# Runs fuzzing nightly.
|
||||
name: fuzz-nightly
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 3 * * *'
|
||||
pull_request:
|
||||
branches: [master]
|
||||
paths:
|
||||
- "test/fuzz/**/*.go"
|
||||
|
||||
jobs:
|
||||
fuzz-nightly-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.16'
|
||||
|
||||
- uses: actions/checkout@v2.3.4
|
||||
|
||||
- name: Install go-fuzz
|
||||
working-directory: test/fuzz
|
||||
run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
|
||||
- name: Fuzz mempool-v1
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v1
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz mempool-v0
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v0
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-addrbook
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-addrbook
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-pex
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-pex
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-sc
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-sc
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-rpc-server
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-rpc-server
|
||||
continue-on-error: true
|
||||
|
||||
- name: Archive crashers
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: crashers
|
||||
path: test/fuzz/**/crashers
|
||||
retention-days: 3
|
||||
|
||||
- name: Archive suppressions
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: suppressions
|
||||
path: test/fuzz/**/suppressions
|
||||
retention-days: 3
|
||||
|
||||
- name: Set crashers count
|
||||
working-directory: test/fuzz
|
||||
run: echo "::set-output name=count::$(find . -type d -name 'crashers' | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')"
|
||||
id: set-crashers-count
|
||||
|
||||
outputs:
|
||||
crashers-count: ${{ steps.set-crashers-count.outputs.count }}
|
||||
|
||||
fuzz-nightly-fail:
|
||||
needs: fuzz-nightly-test
|
||||
if: ${{ needs.fuzz-nightly-test.outputs.crashers-count != 0 }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack if any crashers
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly Fuzz Tests
|
||||
SLACK_ICON_EMOJI: ':firecracker:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Crashers found in Nightly Fuzz tests
|
||||
SLACK_FOOTER: ''
|
||||
16
.github/workflows/janitor.yml
vendored
Normal file
16
.github/workflows/janitor.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: Janitor
|
||||
# Janitor cleans up previous runs of various workflows
|
||||
# To add more workflows to cancel visit https://api.github.com/repos/tendermint/tendermint/actions/workflows and find the actions name
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
cancel:
|
||||
name: "Cancel Previous Runs"
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 3
|
||||
steps:
|
||||
- uses: styfle/cancel-workflow-action@0.9.1
|
||||
with:
|
||||
workflow_id: 1041851,1401230,2837803
|
||||
access_token: ${{ github.token }}
|
||||
65
.github/workflows/jepsen.yml
vendored
Normal file
65
.github/workflows/jepsen.yml
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
# Runs a Jepsen test - cas-register (no nemesis) by default.
|
||||
# See inputs for various options.
|
||||
# Repo: https://github.com/tendermint/jepsen
|
||||
#
|
||||
# If you want to test a new breaking version of Tendermint, you'll need to
|
||||
# update the Merkleeyes ABCI app and 'merkleeyesUrl' input accordingly. You can
|
||||
# upload a new tarball to
|
||||
# https://github.com/tendermint/jepsen/releases/tag/0.2.1.
|
||||
#
|
||||
# Manually triggered.
|
||||
name: jepsen
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
workload:
|
||||
description: 'Test workload to run: (cas-register | set)'
|
||||
required: true
|
||||
default: 'cas-register'
|
||||
nemesis:
|
||||
description: 'Nemesis to use: (none | clocks | single-partitions | half-partitions | ring-partitions | split-dup-validators | peekaboo-dup-validators | changing-validators | crash | truncate-tendermint | truncate-merkleeyes)'
|
||||
required: true
|
||||
default: 'none'
|
||||
dupOrSuperByzValidators:
|
||||
description: '"--dup-validators" (multiple validators share the same key) and(or) "--super-byzantine-validators" (byzantine validators have just shy of 2/3 the voting weight)'
|
||||
required: false
|
||||
default: ''
|
||||
concurrency:
|
||||
description: 'How many workers should we run? Must be an integer and >= 10, optionally followed by n (e.g. 3n) to multiply by the number of nodes.'
|
||||
required: true
|
||||
default: 10
|
||||
timeLimit:
|
||||
description: 'Excluding setup and teardown, how long should a test run for, in seconds?'
|
||||
required: true
|
||||
default: 60
|
||||
tendermintUrl:
|
||||
description: 'Where to grab the Tendermint tarball (w/ linux/amd64 binary)'
|
||||
required: true
|
||||
default: 'https://github.com/melekes/katas/releases/download/0.2.0/tendermint.tar.gz'
|
||||
merkleeyesUrl:
|
||||
description: 'Where to grab the Merkleeyes tarball (w/ linux/amd64 binary)'
|
||||
required: true
|
||||
default: 'https://github.com/tendermint/jepsen/releases/download/0.2.1/merkleeyes_0.1.7.tar.gz'
|
||||
|
||||
jobs:
|
||||
jepsen-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout the Jepsen repository
|
||||
uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
repository: 'tendermint/jepsen'
|
||||
|
||||
- name: Start a Jepsen cluster in background
|
||||
working-directory: docker
|
||||
run: ./bin/up --daemon
|
||||
|
||||
- name: Run the test
|
||||
run: docker exec -i jepsen-control bash -c 'source /root/.bashrc; cd /jepsen/tendermint; lein run test --nemesis ${{ github.event.inputs.nemesis }} --workload ${{ github.event.inputs.workload }} --concurrency ${{ github.event.inputs.concurrency }} --tendermint-url ${{ github.event.inputs.tendermintUrl }} --merkleeyes-url ${{ github.event.inputs.merkleeyesUrl }} --time-limit ${{ github.event.inputs.timeLimit }} ${{ github.event.inputs.dupOrSuperByzValidators }}'
|
||||
|
||||
- name: Archive results
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: results
|
||||
path: tendermint/store/latest
|
||||
retention-days: 3
|
||||
4
.github/workflows/linkchecker.yml
vendored
4
.github/workflows/linkchecker.yml
vendored
@@ -6,7 +6,7 @@ jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.11
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
|
||||
with:
|
||||
folder-path: "docs"
|
||||
|
||||
@@ -13,17 +13,17 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 8
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v2.3.0
|
||||
- uses: golangci/golangci-lint-action@v2.5.2
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.31
|
||||
version: v1.38
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
5
.github/workflows/linter.yml
vendored
5
.github/workflows/linter.yml
vendored
@@ -11,6 +11,7 @@ on:
|
||||
branches: [master]
|
||||
paths:
|
||||
- "**.md"
|
||||
- "**.yml"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -18,7 +19,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v2.3.4
|
||||
- name: Lint Code Base
|
||||
uses: docker://github/super-linter:v3
|
||||
env:
|
||||
@@ -27,5 +28,5 @@ jobs:
|
||||
DEFAULT_BRANCH: master
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
VALIDATE_MD: true
|
||||
VALIDATE_OPAENAPI: true
|
||||
VALIDATE_OPENAPI: true
|
||||
VALIDATE_YAML: true
|
||||
|
||||
51
.github/workflows/proto-docker.yml
vendored
Normal file
51
.github/workflows/proto-docker.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: Build & Push TM Proto Builder
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "tools/proto/*"
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "tools/proto/*"
|
||||
schedule:
|
||||
# run this job once a month to recieve any go or buf updates
|
||||
- cron: "* * 1 * *"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
DOCKER_IMAGE=tendermintdev/docker-build-proto
|
||||
VERSION=noop
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
elif [[ $GITHUB_REF == refs/heads/* ]]; then
|
||||
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
|
||||
if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then
|
||||
VERSION=latest
|
||||
fi
|
||||
fi
|
||||
TAGS="${DOCKER_IMAGE}:${VERSION}"
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1.5.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1.10.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2.7.0
|
||||
with:
|
||||
context: ./tools/proto
|
||||
file: ./tools/proto/Dockerfile
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
5
.github/workflows/proto.yml
vendored
5
.github/workflows/proto.yml
vendored
@@ -2,6 +2,7 @@ name: Protobuf
|
||||
# Protobuf runs buf (https://buf.build/) lint and check-breakage
|
||||
# This workflow is only run when a .proto file has been modified
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
pull_request:
|
||||
paths:
|
||||
- "**.proto"
|
||||
@@ -10,13 +11,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- name: lint
|
||||
run: make proto-lint
|
||||
proto-breakage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- name: check-breakage
|
||||
run: make proto-check-breaking-ci
|
||||
|
||||
4
.github/workflows/release.yml
vendored
4
.github/workflows/release.yml
vendored
@@ -12,13 +12,13 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.15'
|
||||
go-version: '1.16'
|
||||
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
|
||||
8
.github/workflows/stale.yml
vendored
8
.github/workflows/stale.yml
vendored
@@ -7,12 +7,14 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v3
|
||||
- uses: actions/stale@v4
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions."
|
||||
days-before-stale: 10
|
||||
days-before-close: 4
|
||||
days-before-stale: -1
|
||||
days-before-close: -1
|
||||
days-before-pr-stale: 10
|
||||
days-before-pr-close: 4
|
||||
exempt-pr-labels: "S:wip"
|
||||
|
||||
70
.github/workflows/tests.yml
vendored
70
.github/workflows/tests.yml
vendored
@@ -10,14 +10,6 @@ on:
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
cleanup-runs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: rokroskar/workflow-run-cleanup-action@master
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
if: "!startsWith(github.ref, 'refs/tags/') && github.ref != 'refs/heads/master'"
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
@@ -25,9 +17,9 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -36,7 +28,7 @@ jobs:
|
||||
- name: install
|
||||
run: make install install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: actions/cache@v2.1.3
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
@@ -44,44 +36,12 @@ jobs:
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
# Cache binaries for use by other jobs
|
||||
- uses: actions/cache@v2.1.3
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- name: test_abci_apps
|
||||
run: abci/tests/test_app/test.sh
|
||||
shell: bash
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
@@ -89,22 +49,22 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.3
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.3
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
@@ -120,22 +80,22 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.3
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.3
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
|
||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -15,7 +15,7 @@
|
||||
.vagrant
|
||||
.vendor-new/
|
||||
.vscode/
|
||||
abci-cli
|
||||
abci/abci-cli
|
||||
addrbook.json
|
||||
artifacts/*
|
||||
build/*
|
||||
@@ -24,6 +24,7 @@ docs/.vuepress/dist
|
||||
docs/_build
|
||||
docs/dist
|
||||
docs/node_modules/
|
||||
docs/spec
|
||||
index.html.md
|
||||
libs/pubsub/query/fuzz_test/output
|
||||
profile\.out
|
||||
@@ -35,9 +36,13 @@ shunit2
|
||||
terraform.tfstate
|
||||
terraform.tfstate.backup
|
||||
terraform.tfstate.d
|
||||
test/app/grpc_client
|
||||
test/e2e/build
|
||||
test/e2e/networks/*/
|
||||
test/logs
|
||||
test/maverick/maverick
|
||||
test/p2p/data/
|
||||
vendor
|
||||
vendor
|
||||
test/fuzz/**/corpus
|
||||
test/fuzz/**/crashers
|
||||
test/fuzz/**/suppressions
|
||||
test/fuzz/**/*.zip
|
||||
|
||||
@@ -39,6 +39,7 @@ linters:
|
||||
# - wsl
|
||||
# - gocognit
|
||||
- nolintlint
|
||||
- asciicheck
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
@@ -53,9 +54,9 @@ issues:
|
||||
linters-settings:
|
||||
dogsled:
|
||||
max-blank-identifiers: 3
|
||||
maligned:
|
||||
suggest-new: true
|
||||
# govet:
|
||||
# check-shadowing: true
|
||||
golint:
|
||||
min-confidence: 0
|
||||
maligned:
|
||||
suggest-new: true
|
||||
misspell:
|
||||
locale: US
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
project_name: Tendermint
|
||||
project_name: tendermint
|
||||
|
||||
env:
|
||||
# Require use of Go modules.
|
||||
|
||||
284
CHANGELOG.md
284
CHANGELOG.md
@@ -1,5 +1,194 @@
|
||||
# Changelog
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
## v0.34.12
|
||||
|
||||
*August 17, 2021*
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106.
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#6717](https://github.com/tendermint/tendermint/pull/6717) introduce
|
||||
`/genesis_chunked` rpc endpoint for handling large genesis files by chunking them (@tychoish)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [rpc] [\#6825](https://github.com/tendermint/tendermint/issues/6825) Remove egregious INFO log from `ABCI#Query` RPC. (@alexanderbez)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [light] [\#6685](https://github.com/tendermint/tendermint/pull/6685) fix bug
|
||||
with incorrectly handling contexts that would occasionally freeze state sync. (@cmwaters)
|
||||
- [privval] [\#6748](https://github.com/tendermint/tendermint/issues/6748) Fix vote timestamp to prevent chain halt (@JayT106)
|
||||
|
||||
## v0.34.11
|
||||
|
||||
*June 18, 2021*
|
||||
|
||||
This release improves the robustness of statesync; tweaking channel priorities and timeouts and
|
||||
adding two new parameters to the state sync config.
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Apps
|
||||
- [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` is not required to be set as a ldflag any longer.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [statesync] [\#6566](https://github.com/tendermint/tendermint/pull/6566) Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
||||
- [statesync] [\#6378](https://github.com/tendermint/tendermint/pull/6378) Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish)
|
||||
- [statesync] [\#6582](https://github.com/tendermint/tendermint/pull/6582) Increase chunk priority and add multiple retry chunk requests (@cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters)
|
||||
|
||||
## v0.34.10
|
||||
|
||||
*April 14, 2021*
|
||||
|
||||
This release fixes a bug where peers would sometimes try to send messages
|
||||
on incorrect channels. Special thanks to our friends at Oasis Labs for surfacing
|
||||
this issue!
|
||||
|
||||
- [p2p/node] [\#6339](https://github.com/tendermint/tendermint/issues/6339) Fix bug with using custom channels (@cmwaters)
|
||||
- [light] [\#6346](https://github.com/tendermint/tendermint/issues/6346) Correctly handle too high errors to improve client robustness (@cmwaters)
|
||||
|
||||
## v0.34.9
|
||||
|
||||
*April 8, 2021*
|
||||
|
||||
This release fixes a moderate severity security issue, Security Advisory Alderfly,
|
||||
which impacts all networks that rely on Tendermint light clients.
|
||||
Further details will be released once networks have upgraded.
|
||||
|
||||
This release also includes a small Go API-breaking change, to reduce panics in the RPC layer.
|
||||
|
||||
Special thanks to our external contributors on this release: @gchaincl
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Go API
|
||||
- [rpc/jsonrpc/server] [\#6204](https://github.com/tendermint/tendermint/issues/6204) Modify `WriteRPCResponseHTTP(Error)` to return an error (@melekes)
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#6226](https://github.com/tendermint/tendermint/issues/6226) Index block events and expose a new RPC method, `/block_search`, to allow querying for blocks by `BeginBlock` and `EndBlock` events (@alexanderbez)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [rpc/jsonrpc/server] [\#6191](https://github.com/tendermint/tendermint/issues/6191) Correctly unmarshal `RPCRequest` when data is `null` (@melekes)
|
||||
- [p2p] [\#6289](https://github.com/tendermint/tendermint/issues/6289) Fix "unknown channels" bug on CustomReactors (@gchaincl)
|
||||
- [light/evidence] Adds logic to handle forward lunatic attacks (@cmwaters)
|
||||
|
||||
## v0.34.8
|
||||
|
||||
*February 25, 2021*
|
||||
|
||||
This release, in conjunction with [a fix in the Cosmos SDK](https://github.com/cosmos/cosmos-sdk/pull/8641),
|
||||
introduces changes that should mean the logs are much, much quieter. 🎉
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [libs/log] [\#6174](https://github.com/tendermint/tendermint/issues/6174) Include timestamp (`ts` field; `time.RFC3339Nano` format) in JSON logger output (@melekes)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [abci] [\#6124](https://github.com/tendermint/tendermint/issues/6124) Fixes a panic condition during callback execution in `ReCheckTx` during high tx load. (@alexanderbez)
|
||||
|
||||
## v0.34.7
|
||||
|
||||
*February 18, 2021*
|
||||
|
||||
This release fixes a downstream security issue which impacts Cosmos SDK
|
||||
users who are:
|
||||
|
||||
* Using Cosmos SDK v0.40.0 or later, AND
|
||||
* Running validator nodes, AND
|
||||
* Using the file-based `FilePV` implementation for their consensus keys
|
||||
|
||||
Users who fulfill all the above criteria were susceptible to leaking
|
||||
private key material in the logs. All other users are unaffected.
|
||||
|
||||
The root cause was a discrepancy
|
||||
between the Tendermint Core (untyped) logger and the Cosmos SDK (typed) logger:
|
||||
Tendermint Core's logger automatically stringifies Go interfaces whenever possible;
|
||||
however, the Cosmos SDK's logger uses reflection to log the fields within a Go interface.
|
||||
|
||||
The introduction of the typed logger meant that previously un-logged fields within
|
||||
interfaces are now sometimes logged, including the private key material inside the
|
||||
`FilePV` struct.
|
||||
|
||||
Tendermint Core v0.34.7 fixes this issue; however, we strongly recommend that all validators
|
||||
use remote signer implementations instead of `FilePV` in production.
|
||||
|
||||
Thank you to @joe-bowman for his assistance with this vulnerability and a particular
|
||||
shout-out to @marbar3778 for diagnosing it quickly.
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#6128](https://github.com/tendermint/tendermint/pull/6128) Remove privValidator from log call (@tessr)
|
||||
|
||||
## v0.34.6
|
||||
|
||||
*February 18, 2021*
|
||||
|
||||
_Tendermint Core v0.34.5 and v0.34.6 have been recalled due to release tooling problems._
|
||||
|
||||
## v0.34.4
|
||||
|
||||
*February 11, 2021*
|
||||
|
||||
This release includes a fix for a memory leak in the evidence reactor (see #6068, below).
|
||||
All Tendermint clients are recommended to upgrade.
|
||||
Thank you to our friends at Crypto.com for the initial report of this memory leak!
|
||||
|
||||
Special thanks to other external contributors on this release: @yayajacky, @odidev, @laniehei, and @c29r3!
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [light] [\#6022](https://github.com/tendermint/tendermint/pull/6022) Fix a bug when the number of validators equals 100 (@melekes)
|
||||
- [light] [\#6026](https://github.com/tendermint/tendermint/pull/6026) Fix a bug when height isn't provided for the rpc calls: `/commit` and `/validators` (@cmwaters)
|
||||
- [evidence] [\#6068](https://github.com/tendermint/tendermint/pull/6068) Terminate broadcastEvidenceRoutine when peer is stopped (@melekes)
|
||||
|
||||
## v0.34.3
|
||||
|
||||
*January 19, 2021*
|
||||
|
||||
This release includes a fix for a high-severity security vulnerability,
|
||||
a DoS-vector that impacted Tendermint Core v0.34.0-v0.34.2. For more details, see
|
||||
[Security Advisory Mulberry](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg)
|
||||
or https://nvd.nist.gov/vuln/detail/CVE-2021-21271.
|
||||
|
||||
Tendermint Core v0.34.3 also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for
|
||||
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [evidence] [[security fix]](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg) Use correct source of evidence time (@cmwaters)
|
||||
- [proto] [\#5886](https://github.com/tendermint/tendermint/pull/5889) Bump gogoproto to 1.3.2 (@marbar3778)
|
||||
|
||||
## v0.34.2
|
||||
|
||||
*January 12, 2021*
|
||||
|
||||
This release fixes a substantial bug in evidence handling where evidence could
|
||||
sometimes be broadcast before the block containing that evidence was fully committed,
|
||||
resulting in some nodes panicking when trying to verify said evidence.
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Go API
|
||||
- [libs/os] [\#5871](https://github.com/tendermint/tendermint/issues/5871) `EnsureDir` now propagates IO errors and checks the file type (@erikgrinaker)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [evidence] [\#5890](https://github.com/tendermint/tendermint/pull/5890) Add a buffer to evidence from consensus to avoid broadcasting and proposing evidence before the
|
||||
height of such an evidence has finished (@cmwaters)
|
||||
- [statesync] [\#5889](https://github.com/tendermint/tendermint/issues/5889) Set `LastHeightConsensusParamsChanged` when bootstrapping Tendermint state (@cmwaters)
|
||||
|
||||
## v0.34.1
|
||||
|
||||
*January 6, 2021*
|
||||
@@ -12,8 +201,6 @@ disconnecting from this node. As a temporary remedy (until the mempool package
|
||||
is refactored), the `max-batch-bytes` was disabled. Transactions will be sent
|
||||
one by one without batching.
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -42,8 +229,6 @@ Holy smokes, this is a big one! For a more reader-friendly overview of the chang
|
||||
Special thanks to external contributors on this release: @james-ray, @fedekunze, @favadi, @alessio,
|
||||
@joe-bowman, @cuonglm, @SadPencil and @dongsam.
|
||||
|
||||
And as always, friendly reminder, that we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -70,14 +255,14 @@ And as always, friendly reminder, that we have a [bug bounty program](https://ha
|
||||
- [blockchain] [\#4637](https://github.com/tendermint/tendermint/pull/4637) Migrate blockchain reactor(s) to Protobuf encoding (@marbar3778)
|
||||
- [evidence] [\#4949](https://github.com/tendermint/tendermint/pull/4949) Migrate evidence reactor to Protobuf encoding (@marbar3778)
|
||||
- [mempool] [\#4940](https://github.com/tendermint/tendermint/pull/4940) Migrate mempool from to Protobuf encoding (@marbar3778)
|
||||
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
|
||||
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
|
||||
- `MaxBatchBytes` new config setting defines the max size of one batch.
|
||||
- [p2p/pex] [\#4973](https://github.com/tendermint/tendermint/pull/4973) Migrate `p2p/pex` reactor to Protobuf encoding (@marbar3778)
|
||||
- [statesync] [\#4943](https://github.com/tendermint/tendermint/pull/4943) Migrate state sync reactor to Protobuf encoding (@marbar3778)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
|
||||
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
|
||||
- [evidence] [\#5499](https://github.com/tendermint/tendermint/pull/5449) Cap evidence to a maximum number of bytes (supercedes [\#4780](https://github.com/tendermint/tendermint/pull/4780)) (@cmwaters)
|
||||
- [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) Header hashes are no longer empty for empty inputs, notably `DataHash`, `EvidenceHash`, and `LastResultsHash` (@erikgrinaker)
|
||||
- [state] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Include `GasWanted` and `GasUsed` into `LastResultsHash` (@melekes)
|
||||
@@ -136,7 +321,7 @@ And as always, friendly reminder, that we have a [bug bounty program](https://ha
|
||||
- [types] [\#4852](https://github.com/tendermint/tendermint/pull/4852) Vote & Proposal `SignBytes` is now func `VoteSignBytes` & `ProposalSignBytes` (@marbar3778)
|
||||
- [types] [\#4798](https://github.com/tendermint/tendermint/pull/4798) Simplify `VerifyCommitTrusting` func + remove extra validation (@melekes)
|
||||
- [types] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Remove `ABCIResult` (@melekes)
|
||||
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
|
||||
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `Total` in `Parts` & `PartSetHeader` has been changed from a `int` to a `uint32` (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Vote: `ValidatorIndex` & `Round` are now `int32` (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Proposal: `POLRound` & `Round` are now `int32` (@marbar3778)
|
||||
@@ -174,7 +359,7 @@ And as always, friendly reminder, that we have a [bug bounty program](https://ha
|
||||
- [evidence] [\#4722](https://github.com/tendermint/tendermint/pull/4722) Consolidate evidence store and pool types to improve evidence DB (@cmwaters)
|
||||
- [evidence] [\#4839](https://github.com/tendermint/tendermint/pull/4839) Reject duplicate evidence from being proposed (@cmwaters)
|
||||
- [evidence] [\#5219](https://github.com/tendermint/tendermint/pull/5219) Change the source of evidence time to block time (@cmwaters)
|
||||
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
|
||||
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
|
||||
- [light] [\#4935](https://github.com/tendermint/tendermint/pull/4935) Fetch and compare a new header with witnesses in parallel (@melekes)
|
||||
- [light] [\#4929](https://github.com/tendermint/tendermint/pull/4929) Compare header with witnesses only when doing bisection (@melekes)
|
||||
- [light] [\#4916](https://github.com/tendermint/tendermint/pull/4916) Validate basic for inbound validator sets and headers before further processing them (@cmwaters)
|
||||
@@ -284,9 +469,6 @@ as 2/3+ of the signatures are checked._
|
||||
|
||||
Special thanks to @njmurarka at Bluzelle Networks for reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [consensus] Do not allow signatures for a wrong block in commits (@ebuchman)
|
||||
@@ -302,8 +484,6 @@ need to update your code.**
|
||||
|
||||
Special thanks to external contributors on this release: @tau3,
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -363,8 +543,6 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
|
||||
Special thanks to external contributors on this release: @whylee259, @greg-szabo
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -451,9 +629,6 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
|
||||
@@ -466,8 +641,6 @@ program](https://hackerone.com/tendermint).
|
||||
Special thanks to external contributors on this release:
|
||||
@antho1404, @michaelfig, @gterzian, @tau3, @Shivani912
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -518,9 +691,6 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
Special thanks to external contributors on this release:
|
||||
@princesinha19
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc] [\#3333](https://github.com/tendermint/tendermint/issues/3333) Add `order_by` to `/tx_search` endpoint, allowing to change default ordering from asc to desc (@princesinha19)
|
||||
@@ -539,9 +709,6 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
Special thanks to external contributors on this release: @mrekucci, @PSalant726, @princesinha19, @greg-szabo, @dongsam, @cuonglm, @jgimeno, @yenkhoon
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program.](https://hackerone.com/tendermint).
|
||||
|
||||
*January 14, 2020*
|
||||
|
||||
This release contains breaking changes to the `Block#Header`, specifically
|
||||
@@ -770,9 +937,6 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
|
||||
@@ -784,9 +948,6 @@ _January, 9, 2020_
|
||||
|
||||
Special thanks to external contributors on this release: @greg-szabo, @gregzaitsev, @yenkhoon
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc/lib] [\#4248](https://github.com/tendermint/tendermint/issues/4248) RPC client basic authentication support (@greg-szabo)
|
||||
@@ -808,9 +969,6 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
Special thanks to external contributors on this release: @erikgrinaker, @guagualvcha, @hsyis, @cosmostuba, @whunmr, @austinabell
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program.](https://hackerone.com/tendermint).
|
||||
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -850,9 +1008,6 @@ identified and fixed here.
|
||||
Special thanks to [elvishacker](https://hackerone.com/elvishacker) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -879,9 +1034,6 @@ accepting new peers and only allowing `ed25519` pubkeys.
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for pointing
|
||||
this out.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Only allow ed25519 pubkeys when connecting
|
||||
@@ -897,9 +1049,6 @@ All clients are recommended to upgrade. See
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for discovering
|
||||
and reporting this issue.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Fix for panic on nil public key send to a peer
|
||||
@@ -910,9 +1059,6 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
Special thanks to external contributors on this release: @jon-certik, @gracenoah, @PSalant726, @gchaincl
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -948,9 +1094,6 @@ guide.
|
||||
Special thanks to external contributors on this release:
|
||||
@gchaincl, @bluele, @climber73
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [consensus] [\#3839](https://github.com/tendermint/tendermint/issues/3839) Reduce "Error attempting to add vote" message severity (Error -> Info)
|
||||
@@ -971,9 +1114,6 @@ program](https://hackerone.com/tendermint).
|
||||
Special thanks to external contributors on this release:
|
||||
@ruseinov, @bluele, @guagualvcha
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1013,9 +1153,6 @@ This release contains a minor enhancement to the ABCI and some breaking changes
|
||||
- CheckTx requests include a `CheckTxType` enum that can be set to `Recheck` to indicate to the application that this transaction was already checked/validated and certain expensive operations (like checking signatures) can be skipped
|
||||
- Removed various functions from `libs` pkgs
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1061,9 +1198,6 @@ and the RPC, namely:
|
||||
[docs](https://github.com/tendermint/tendermint/blob/60827f75623b92eff132dc0eff5b49d2025c591e/docs/spec/abci/abci.md#events)
|
||||
- Bind RPC to localhost by default, not to the public interface [UPGRADING/RPC_Changes](./UPGRADING.md#rpc_changes)
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
@@ -1164,8 +1298,6 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
@@ -1186,8 +1318,6 @@ identified and fixed here.
|
||||
Special thanks to [elvishacker](https://hackerone.com/elvishacker) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -1215,8 +1345,6 @@ accepting new peers and only allowing `ed25519` pubkeys.
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for pointing
|
||||
this out.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
@@ -1233,8 +1361,6 @@ All clients are recommended to upgrade. See
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for discovering
|
||||
and reporting this issue.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
@@ -1530,8 +1656,6 @@ See the [v0.31.0
|
||||
Milestone](https://github.com/tendermint/tendermint/milestone/19?closed=1) for
|
||||
more details.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -1752,8 +1876,6 @@ This release contains two important fixes: one for p2p layer where we sometimes
|
||||
were not closing connections and one for consensus layer where consensus with
|
||||
no empty blocks (`create_empty_blocks = false`) could halt.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [pex] [\#3037](https://github.com/tendermint/tendermint/issues/3037) Only log "Reached max attempts to dial" once
|
||||
@@ -1793,8 +1915,6 @@ While we are trying to stabilize the Block protocol to preserve compatibility
|
||||
with old chains, there may be some final changes yet to come before Cosmos
|
||||
launch as we continue to audit and test the software.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -1843,8 +1963,6 @@ program](https://hackerone.com/tendermint).
|
||||
Special thanks to external contributors on this release:
|
||||
@HaoyangLiu
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BUG FIXES:
|
||||
- [consensus] Fix consensus halt from proposing blocks with too much evidence
|
||||
@@ -1898,7 +2016,7 @@ See [UPGRADING.md](UPGRADING.md) for more details.
|
||||
|
||||
- [build] [\#3085](https://github.com/tendermint/tendermint/issues/3085) Fix `Version` field in build scripts (@husio)
|
||||
- [crypto/multisig] [\#3102](https://github.com/tendermint/tendermint/issues/3102) Fix multisig keys address length
|
||||
- [crypto/encoding] [\#3101](https://github.com/tendermint/tendermint/issues/3101) Fix `PubKeyMultisigThreshold` unmarshalling into `crypto.PubKey` interface
|
||||
- [crypto/encoding] [\#3101](https://github.com/tendermint/tendermint/issues/3101) Fix `PubKeyMultisigThreshold` unmarshaling into `crypto.PubKey` interface
|
||||
- [p2p/conn] [\#3111](https://github.com/tendermint/tendermint/issues/3111) Make SecretConnection thread safe
|
||||
- [rpc] [\#3053](https://github.com/tendermint/tendermint/issues/3053) Fix internal error in `/tx_search` when results are empty
|
||||
(@gianfelipe93)
|
||||
@@ -1973,8 +2091,6 @@ Special thanks to @dlguddus for discovering a [major
|
||||
issue](https://github.com/tendermint/tendermint/issues/2718#issuecomment-440888677)
|
||||
in the proposer selection algorithm.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
This release is primarily about fixes to the proposer selection algorithm
|
||||
in preparation for the [Cosmos Game of
|
||||
@@ -2037,8 +2153,6 @@ Special thanks to external contributors on this release:
|
||||
@ackratos, @goolAdapter, @james-ray, @joe-bowman, @kostko,
|
||||
@nagarajmanjunath, @tomtau
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
@@ -2078,8 +2192,6 @@ program](https://hackerone.com/tendermint).
|
||||
Special thanks to external contributors on this release:
|
||||
@danil-lashin, @kevlubkcm, @krhubert, @srmo
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -2124,8 +2236,6 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
Special thanks to external contributors on this release: @hleb-albau, @zhuzeyu
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc] [\#2582](https://github.com/tendermint/tendermint/issues/2582) Enable CORS on RPC API (@hleb-albau)
|
||||
@@ -2143,8 +2253,6 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
|
||||
Special thanks to external contributors on this release: @katakonst
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [consensus] [\#2704](https://github.com/tendermint/tendermint/issues/2704) Simplify valid POL round logic
|
||||
@@ -2318,8 +2426,6 @@ It also addresses some issues found via security audit, removes various unused
|
||||
functions from `libs/common`, and implements
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-012-peer-transport.md).
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
@@ -2349,7 +2455,7 @@ FEATURES:
|
||||
- [libs] [\#2286](https://github.com/tendermint/tendermint/issues/2286) Panic if `autofile` or `db/fsdb` permissions change from 0600.
|
||||
|
||||
IMPROVEMENTS:
|
||||
- [libs/db] [\#2371](https://github.com/tendermint/tendermint/issues/2371) Output error instead of panic when the given `db_backend` is not initialised (@bradyjoestar)
|
||||
- [libs/db] [\#2371](https://github.com/tendermint/tendermint/issues/2371) Output error instead of panic when the given `db_backend` is not initialized (@bradyjoestar)
|
||||
- [mempool] [\#2399](https://github.com/tendermint/tendermint/issues/2399) Make mempool cache a proper LRU (@bradyjoestar)
|
||||
- [p2p] [\#2126](https://github.com/tendermint/tendermint/issues/2126) Introduce PeerTransport interface to improve isolation of concerns
|
||||
- [libs/common] [\#2326](https://github.com/tendermint/tendermint/issues/2326) Service returns ErrNotStarted
|
||||
|
||||
@@ -4,50 +4,119 @@
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
@p4u from vocdoni.io reported that the mempool might behave incorrectly under a
|
||||
high load. The consequences can range from pauses between blocks to the peers
|
||||
disconnecting from this node. As a temporary remedy (until the mempool package
|
||||
is refactored), the `max-batch-bytes` was disabled. Transactions will be sent
|
||||
one by one without batching.
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [pubsub/events] \#6634 The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez)
|
||||
- [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
|
||||
- [config] \#5728 `fast_sync = "v1"` is no longer supported (@melekes)
|
||||
- [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
|
||||
- [cli] \#5777 use hypen-case instead of snake_case for all cli comamnds and config parameters
|
||||
- [cli] \#5777 use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters)
|
||||
- [rpc] \#6019 standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters)
|
||||
- [rpc] \#6168 Change default sorting to desc for `/tx_search` results (@melekes)
|
||||
- [cli] \#6282 User must specify the node mode when using `tendermint init` (@cmwaters)
|
||||
- [state/indexer] \#6382 reconstruct indexer, move txindex into the indexer package (@JayT106)
|
||||
- [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
|
||||
- [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
|
||||
- [rpc] \#6610 Add MaxPeerBlockHeight into /status rpc call (@JayT106)
|
||||
- [fastsync/rpc] \#6620 Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106)
|
||||
- [rpc/grpc] \#6725 Mark gRPC in the RPC layer as deprecated.
|
||||
- [blockchain/v2] \#6730 Fast Sync v2 is deprecated, please use v0
|
||||
- [rpc] \#6820 Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users.
|
||||
|
||||
- Apps
|
||||
- [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
|
||||
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
|
||||
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
|
||||
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
|
||||
- [ABCI] \#3546 Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield)
|
||||
- [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`.
|
||||
- It is not required any longer to set ldflags to set version strings
|
||||
- [abci/counter] \#6684 Delete counter example app
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- Go API
|
||||
- [pubsub] \#6634 The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez)
|
||||
- [p2p] \#6618 Move `p2p.NodeInfo` into `types` to support use of the SDK. (@tychoish)
|
||||
- [p2p] \#6583 Make `p2p.NodeID` and `p2p.NetAddress` exported types to support their use in the RPC layer. (@tychoish)
|
||||
- [node] \#6540 Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish)
|
||||
- [p2p] \#6547 Move the entire `p2p` package and all reactor implementations into `internal`. (@tychoish)
|
||||
- [libs/log] \#6534 Remove the existing custom Tendermint logger backed by go-kit. The logging interface, `Logger`, remains. Tendermint still provides a default logger backed by the performant zerolog logger. (@alexanderbez)
|
||||
- [libs/time] \#6495 Move types/time to libs/time to improve consistency. (@tychoish)
|
||||
- [mempool] \#6529 The `Context` field has been removed from the `TxInfo` type. `CheckTx` now requires a `Context` argument. (@alexanderbez)
|
||||
- [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes)
|
||||
- [p2p] Removed unused function `MakePoWTarget`. (@erikgrinaker)
|
||||
- [p2p] Remove unused function `MakePoWTarget`. (@erikgrinaker)
|
||||
- [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes)
|
||||
- [proto/p2p] Renamed `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
|
||||
- [proto/p2p] Rename `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
|
||||
- [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker)
|
||||
- [libs/os] `EnsureDir` now propagates IO errors and checks the file type (@erikgrinaker)
|
||||
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
|
||||
- [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters)
|
||||
- [state] \#5864 Use an iterator when pruning state (@cmwaters)
|
||||
- [types] \#6023 Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`.
|
||||
- Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic.
|
||||
- [light] \#6054 Move `MaxRetryAttempt` option from client to provider.
|
||||
- `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters)
|
||||
- [all] \#6077 Change spelling from British English to American (@cmwaters)
|
||||
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
|
||||
- Rename "behaviour" pkg to "behavior" and internalized it in blockchain v2
|
||||
- [rpc/client/http] \#6176 Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes)
|
||||
- [rpc/client/http] \#6176 Unexpose `WSEvents` (@melekes)
|
||||
- [rpc/jsonrpc/client/ws_client] \#6176 `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes)
|
||||
- [internal/libs] \#6366 Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder
|
||||
- [libs/rand] \#6364 Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi)
|
||||
- [mempool] \#6466 The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package.
|
||||
Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself
|
||||
and `TxInfo`. (@alexanderbez)
|
||||
- [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
|
||||
- [types] \#6627 Move `NodeKey` to types to make the type public.
|
||||
- [config] \#6627 Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID`
|
||||
- [blocksync] \#6755 Rename `FastSync` and `Blockchain` package to `BlockSync`
|
||||
(@cmwaters)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
- Data Storage
|
||||
- [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters)
|
||||
- [mempool] \#6396 Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish)
|
||||
- [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106)
|
||||
|
||||
- Tooling
|
||||
- [tools] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
||||
- [cli/indexer] \#6676 Reindex events command line tooling. (@JayT106)
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam
|
||||
- [rpc] \#6329 Don't cap page size in unsafe mode (@gotjoshua, @cmwaters)
|
||||
- [pex] \#6305 v2 pex reactor with backwards compatability. Introduces two new pex messages to
|
||||
accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer
|
||||
exchange reactors behave the same. (@cmwaters)
|
||||
- [crypto] \#6376 Enable sr25519 as a validator key
|
||||
- [mempool] \#6466 Introduction of a prioritized mempool. (@alexanderbez)
|
||||
- `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of
|
||||
the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index.
|
||||
- Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the
|
||||
`mempool.version` configuration, where `v1` is the default configuration.
|
||||
- Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node.
|
||||
- Transactions are gossiped in FIFO order as they are in `v0`.
|
||||
- [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
|
||||
- [fastsync/event] \#6619 Emit fastsync status event when switching consensus/fastsync (@JayT106)
|
||||
- [statesync/event] \#6700 Emit statesync status start/end event (@JayT106)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [libs/log] Console log formatting changes as a result of \#6534 and \#6589. (@tychoish)
|
||||
- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
||||
- [types] \#6478 Add `block_id` to `newblock` event (@jeebster)
|
||||
- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778)
|
||||
- [crypto/ed25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning)
|
||||
- [crypto/sr25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning)
|
||||
- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys.
|
||||
- [privval] \#5725 add gRPC support to private validator.
|
||||
- [privval] \#5725 Add gRPC support to private validator.
|
||||
- [privval] \#5876 `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778)
|
||||
- [abci/client] \#5673 `Async` requests return an error if queue is full (@melekes)
|
||||
- [mempool] \#5673 Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes)
|
||||
- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
|
||||
@@ -56,10 +125,37 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- [cli] \#5772 `gen_node_key` output now contains node ID (`id` field) (@melekes)
|
||||
- [blockchain/v2] \#5774 Send status request when new peer joins (@melekes)
|
||||
- [consensus] \#5792 Deprecates the `time_iota_ms` consensus parameter, to reduce the bug surface. The parameter is no longer used. (@valardragon)
|
||||
- [store] \#5888 store.SaveBlock saves using batches instead of transactions for now to improve ACID properties. This is a quick fix for underlying issues around tm-db and ACID guarantees. (@githubsands)
|
||||
- [consensus] \#5987 Remove `time_iota_ms` from consensus params. Merge `tmproto.ConsensusParams` and `abci.ConsensusParams`. (@marbar3778)
|
||||
- [types] \#5994 Reduce the use of protobuf types in core logic. (@marbar3778)
|
||||
- `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams`, `sm.Version` and `version.Consensus` have become native types. They still utilize protobuf when being sent over the wire or written to disk.
|
||||
- [rpc/client/http] \#6163 Do not drop events even if the `out` channel is full (@melekes)
|
||||
- [node] \#6059 Validate and complete genesis doc before saving to state store (@silasdavis)
|
||||
- [state] \#6067 Batch save state data (@githubsands & @cmwaters)
|
||||
- [crypto] \#6120 Implement batch verification interface for ed25519 and sr25519. (@marbar3778)
|
||||
- [types] \#6120 use batch verification for verifying commits signatures.
|
||||
- If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature.
|
||||
- [privval/file] \#6185 Return error on `LoadFilePV`, `LoadFilePVEmptyState`. Allows for better programmatic control of Tendermint.
|
||||
- [privval] \#6240 Add `context.Context` to privval interface.
|
||||
- [rpc] \#6265 set cache control in http-rpc response header (@JayT106)
|
||||
- [statesync] \#6378 Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots.
|
||||
- [node/state] \#6370 graceful shutdown in the consensus reactor (@JayT106)
|
||||
- [crypto/merkle] \#6443 Improve HashAlternatives performance (@cuonglm)
|
||||
- [crypto/merkle] \#6513 Optimize HashAlternatives (@marbar3778)
|
||||
- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm)
|
||||
- [consensus/metrics] \#6549 Change block_size gauge to a histogram for better observability over time (@marbar3778)
|
||||
- [statesync] \#6587 Increase chunk priority and re-request chunks that don't arrive (@cmwaters)
|
||||
- [state/privval] \#6578 No GetPubKey retry beyond the proposal/voting window (@JayT106)
|
||||
- [rpc] \#6615 Add TotalGasUsed to block_results response (@crypto-facs)
|
||||
- [cmd/tendermint/commands] \#6623 replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [types] \#5523 Change json naming of `PartSetHeader` within `BlockID` from `parts` to `part_set_header` (@marbar3778)
|
||||
- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
|
||||
- [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes)
|
||||
- [blockchain/v1] \#5711 Fix deadlock (@melekes)
|
||||
- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
|
||||
- [rpc] \#6507 Ensure RPC client can handle URLs without ports (@JayT106)
|
||||
- [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
|
||||
- [fastsync] \#6590 Update the metrics during fast-sync (@JayT106)
|
||||
- [gitignore] \#6668 Fix gitignore of abci-cli (@tanyabouman)
|
||||
308
CONTRIBUTING.md
308
CONTRIBUTING.md
@@ -26,7 +26,8 @@ will indicate their support with a heartfelt emoji.
|
||||
|
||||
If the issue would benefit from thorough discussion, maintainers may
|
||||
request that you create a [Request For
|
||||
Comment](https://github.com/tendermint/spec/tree/master/rfc). Discussion
|
||||
Comment](https://github.com/tendermint/spec/tree/master/rfc)
|
||||
in the Tendermint spec repo. Discussion
|
||||
at the RFC stage will build collective understanding of the dimensions
|
||||
of the problems and help structure conversations around trade-offs.
|
||||
|
||||
@@ -108,37 +109,20 @@ We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along
|
||||
|
||||
For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`.
|
||||
|
||||
There are two ways to generate your proto stubs.
|
||||
|
||||
1. Use Docker, pull an image that will generate your proto stubs with no need to install anything. `make proto-gen-docker`
|
||||
2. Run `make proto-gen` after installing `buf` and `gogoproto`, you can do this by running `make protobuf`.
|
||||
|
||||
### Installation Instructions
|
||||
|
||||
To install `protoc`, download an appropriate release (<https://github.com/protocolbuffers/protobuf>) and then move the provided binaries into your PATH (follow instructions in README included with the download).
|
||||
|
||||
To install `gogoproto`, do the following:
|
||||
|
||||
```sh
|
||||
go get github.com/gogo/protobuf/gogoproto
|
||||
cd $GOPATH/pkg/mod/github.com/gogo/protobuf@v1.3.1 # or wherever go get installs things
|
||||
make install
|
||||
```
|
||||
|
||||
You should now be able to run `make proto-gen` from inside the root Tendermint directory to generate new files from proto files.
|
||||
We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`.
|
||||
|
||||
### Visual Studio Code
|
||||
|
||||
If you are a VS Code user, you may want to add the following to your `.vscode/settings.json`:
|
||||
If you are a VS Code user, you may want to add the following to your `.vscode/settings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"protoc": {
|
||||
"options": [
|
||||
"--proto_path=${workspaceRoot}/proto",
|
||||
"--proto_path=${workspaceRoot}/third_party/proto"
|
||||
]
|
||||
}
|
||||
{
|
||||
"protoc": {
|
||||
"options": [
|
||||
"--proto_path=${workspaceRoot}/proto",
|
||||
"--proto_path=${workspaceRoot}/third_party/proto"
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -243,125 +227,223 @@ Fixes #nnnn
|
||||
|
||||
Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though!
|
||||
|
||||
### Release Procedure
|
||||
### Release procedure
|
||||
|
||||
#### Major Release
|
||||
#### A note about backport branches
|
||||
Tendermint's `master` branch is under active development.
|
||||
Releases are specified using tags and are built from long-lived "backport" branches.
|
||||
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch,
|
||||
and the backport branches have names like `v0.34.x` or `v0.33.x`
|
||||
(literally, `x`; it is not a placeholder in this case).
|
||||
|
||||
This major release process assumes that this release was preceded by release candidates.
|
||||
If there were no release candidates, and you'd like to cut a major release directly from master, see below.
|
||||
As non-breaking changes land on `master`, they should also be backported (cherry-picked)
|
||||
to these backport branches.
|
||||
|
||||
1. Start on the latest RC branch (`RCx/vX.X.0`).
|
||||
2. Run integration tests.
|
||||
3. Branch off of the RC branch (`git checkout -b release-prep`) and prepare the release:
|
||||
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
||||
and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport
|
||||
to the needed branch. There should be a label for any backport branch that you'll be targeting.
|
||||
To notify the bot to backport a pull request, mark the pull request with
|
||||
the label `S:backport-to-<backport_branch>`.
|
||||
Once the original pull request is merged, the bot will try to cherry-pick the pull request
|
||||
to the backport branch. If the bot fails to backport, it will open a pull request.
|
||||
The author of the original pull request is responsible for solving the conflicts and
|
||||
merging the pull request.
|
||||
|
||||
#### Creating a backport branch
|
||||
If this is the first release candidate for a major release, you get to have the honor of creating
|
||||
the backport branch!
|
||||
|
||||
Note that, after creating the backport branch, you'll also need to update the tags on `master`
|
||||
so that `go mod` is able to order the branches correctly. You should tag `master` with a "dev" tag
|
||||
that is "greater than" the backport branches tags. See #6072 for more context.
|
||||
|
||||
In the following example, we'll assume that we're making a backport branch for
|
||||
the 0.35.x line.
|
||||
|
||||
1. Start on `master`
|
||||
2. Create the backport branch:
|
||||
`git checkout -b v0.35.x`
|
||||
3. Go back to master and tag it as the dev branch for the _next_ major release and push it back up:
|
||||
`git tag -a v0.36.0-dev; git push v0.36.0-dev`
|
||||
4. Create a new workflow to run the e2e nightlies for this backport branch.
|
||||
(See https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-34x.yml
|
||||
for an example.)
|
||||
|
||||
#### Release candidates
|
||||
|
||||
Before creating an official release, especially a major release, we may want to create a
|
||||
release candidate (RC) for our friends and partners to test out. We use git tags to
|
||||
create RCs, and we build them off of backport branches.
|
||||
|
||||
Tags for RCs should follow the "standard" release naming conventions, with `-rcX` at the end
|
||||
(for example, `v0.35.0-rc0`).
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
|
||||
have distinct names from the tags/release names.)
|
||||
|
||||
If this is the first RC for a major release, you'll have to make a new backport branch (see above).
|
||||
Otherwise:
|
||||
|
||||
1. Start from the backport branch (e.g. `v0.35.x`).
|
||||
1. Run the integration tests and the e2e nightlies
|
||||
(which can be triggered from the Github UI;
|
||||
e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml).
|
||||
1. Prepare the changelog:
|
||||
- Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`.
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
1. Open a PR with these changes against the backport branch.
|
||||
1. Once these changes have landed on the backport branch, be sure to pull them back down locally.
|
||||
2. Once you have the changes locally, create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0`
|
||||
3. Push the tag back up to origin:
|
||||
`git push origin v0.35.0-rc0`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
4. Future RCs will continue to be built off of this branch.
|
||||
|
||||
Note that this process should only be used for "true" RCs--
|
||||
release candidates that, if successful, will be the next release.
|
||||
For more experimental "RCs," create a new, short-lived branch and tag that instead.
|
||||
|
||||
#### Major release
|
||||
|
||||
This major release process assumes that this release was preceded by release candidates.
|
||||
If there were no release candidates, begin by creating a backport branch, as described above.
|
||||
|
||||
1. Start on the backport branch (e.g. `v0.35.x`)
|
||||
2. Run integration tests and the e2e nightlies.
|
||||
3. Prepare the release:
|
||||
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
||||
and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
(Squashing includes both combining all entries, as well as removing or simplifying
|
||||
any intra-RC changes. It may also help to alphabetize the entries by package name.)
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
all PRs
|
||||
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Open a PR with these changes against the RC branch (`RCx/vX.X.0`).
|
||||
5. Once these changes are on the RC branch, branch off of the RC branch again to create a release branch:
|
||||
- `git checkout RCx/vX.X.0`
|
||||
- `git checkout -b release/vX.X.0`
|
||||
6. Push a tag with prepared release details. This will trigger the actual release `vX.X.0`.
|
||||
- `git tag -a vX.X.0 -m 'Release vX.X.0'`
|
||||
- `git push origin vX.X.0`
|
||||
7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
8. Create the long-lived minor release branch `RC0/vX.X.1` for the next point release on this
|
||||
new major release series.
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes are on the backport branch, push a tag with prepared release details.
|
||||
This will trigger the actual release `v0.35.0`.
|
||||
- `git tag -a v0.35.0 -m 'Release v0.35.0'`
|
||||
- `git push origin v0.35.0`
|
||||
7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
|
||||
##### Major Release (from `master`)
|
||||
|
||||
1. Start on `master`
|
||||
2. Run integration tests (see `test_integrations` in Makefile)
|
||||
3. Prepare release in a pull request against `master` (to be squash merged):
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`; if this release
|
||||
had release candidates, squash all the RC updates into one
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest
|
||||
release, and add the github aliases of external contributors to the top of
|
||||
the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
- Make sure all significant breaking changes are covered in `UPGRADING.md`
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Push a tag with prepared release details (this will trigger the release `vX.X.0`)
|
||||
- `git tag -a vX.X.x -m 'Release vX.X.x'`
|
||||
- `git push origin vX.X.x`
|
||||
5. Update the `CHANGELOG.md` file on master with the releases changelog.
|
||||
6. Delete any RC branches and tags for this release (if applicable)
|
||||
|
||||
#### Minor Release (Point Releases)
|
||||
#### Minor release (point releases)
|
||||
|
||||
Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master.
|
||||
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, and
|
||||
the backport branches have names like `v0.34.x` or `v0.33.x` (literally, `x`; it is not a placeholder in this case).
|
||||
|
||||
As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches.
|
||||
|
||||
Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate.
|
||||
Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate.
|
||||
|
||||
To create a minor release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout vX.X.x`
|
||||
2. Run integration tests: `make test_integrations`
|
||||
1. Checkout the long-lived backport branch: `git checkout v0.35.x`
|
||||
2. Run integration tests (`make test_integrations`) and the nightlies.
|
||||
3. Check out a new branch and prepare the release:
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during minor releases, and only field additions are valid minor changes.)
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Open a PR with these changes that will land them back on `vX.X.x`
|
||||
4. Open a PR with these changes that will land them back on `v0.35.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a vX.X.x -m 'Release vX.X.x'`
|
||||
- `git push origin vX.X.x`
|
||||
- `git tag -a v0.35.1 -m 'Release v0.35.1'`
|
||||
- `git push origin v0.35.1`
|
||||
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:minor` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into master.
|
||||
|
||||
#### Release Candidates
|
||||
|
||||
Before creating an official release, especially a major release, we may want to create a
|
||||
release candidate (RC) for our friends and partners to test out. We use git tags to
|
||||
create RCs, and we build them off of RC branches. RC branches typically have names formatted
|
||||
like `RCX/vX.X.X` (or, concretely, `RC0/v0.34.0`), while the tags themselves follow
|
||||
the "standard" release naming conventions, with `-rcX` at the end (`vX.X.X-rcX`).
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
|
||||
have distinct names from the tags/release names.)
|
||||
|
||||
1. Start from the RC branch (e.g. `RC0/v0.34.0`).
|
||||
2. Create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.34.0-rc0 -m "Release Candidate v0.34.0-rc0`
|
||||
3. Push the tag back up to origin:
|
||||
`git push origin v0.34.0-rc4`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
4. Create a new release candidate branch for any possible updates to the RC:
|
||||
`git checkout -b RC1/v0.34.0; git push origin RC1/v0.34.0`
|
||||
|
||||
## Testing
|
||||
|
||||
All repos should be hooked up to [CircleCI](https://circleci.com/).
|
||||
### Unit tests
|
||||
|
||||
If they have `.go` files in the root directory, they will be automatically
|
||||
tested by circle using `go test -v -race ./...`. If not, they will need a
|
||||
`circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and
|
||||
includes its continuous integration status using a badge in the `README.md`.
|
||||
Unit tests are located in `_test.go` files as directed by [the Go testing
|
||||
package](https://golang.org/pkg/testing/). If you're adding or removing a
|
||||
function, please check there's a `TestType_Method` test for it.
|
||||
|
||||
Run: `make test`
|
||||
|
||||
### Integration tests
|
||||
|
||||
Integration tests are also located in `_test.go` files. What differentiates
|
||||
them is a more complicated setup, which usually involves setting up two or more
|
||||
components.
|
||||
|
||||
Run: `make test_integrations`
|
||||
|
||||
### End-to-end tests
|
||||
|
||||
End-to-end tests are used to verify a fully integrated Tendermint network.
|
||||
|
||||
See [README](./test/e2e/README.md) for details.
|
||||
|
||||
Run:
|
||||
|
||||
```sh
|
||||
cd test/e2e && \
|
||||
make && \
|
||||
./build/runner -f networks/ci.toml
|
||||
```
|
||||
|
||||
### Model-based tests (ADVANCED)
|
||||
|
||||
*NOTE: if you're just submitting your first PR, you won't need to touch these
|
||||
most probably (99.9%)*.
|
||||
|
||||
For components, that have been [formally
|
||||
verified](https://en.wikipedia.org/wiki/Formal_verification) using
|
||||
[TLA+](https://en.wikipedia.org/wiki/TLA%2B), it may be possible to generate
|
||||
tests using a combination of the [Apalache Model
|
||||
Checker](https://apalache.informal.systems/) and [tendermint-rs testgen
|
||||
util](https://github.com/informalsystems/tendermint-rs/tree/master/testgen).
|
||||
|
||||
Now, I know there's a lot to take in. If you want to learn more, check out [
|
||||
this video](https://www.youtube.com/watch?v=aveoIMphzW8) by Andrey Kupriyanov
|
||||
& Igor Konnov.
|
||||
|
||||
At the moment, we have model-based tests for the light client, located in the
|
||||
`./light/mbt` directory.
|
||||
|
||||
Run: `cd light/mbt && go test`
|
||||
|
||||
### Fuzz tests (ADVANCED)
|
||||
|
||||
*NOTE: if you're just submitting your first PR, you won't need to touch these
|
||||
most probably (99.9%)*.
|
||||
|
||||
[Fuzz tests](https://en.wikipedia.org/wiki/Fuzzing) can be found inside the
|
||||
`./test/fuzz` directory. See [README.md](./test/fuzz/README.md) for details.
|
||||
|
||||
Run: `cd test/fuzz && make fuzz-{PACKAGE-COMPONENT}`
|
||||
|
||||
### Jepsen tests (ADVANCED)
|
||||
|
||||
*NOTE: if you're just submitting your first PR, you won't need to touch these
|
||||
most probably (99.9%)*.
|
||||
|
||||
[Jepsen](http://jepsen.io/) tests are used to verify the
|
||||
[linearizability](https://jepsen.io/consistency/models/linearizable) property
|
||||
of the Tendermint consensus. They are located in a separate repository
|
||||
-> <https://github.com/tendermint/jepsen>. Please refer to its README for more
|
||||
information.
|
||||
|
||||
### RPC Testing
|
||||
|
||||
If you contribute to the RPC endpoints it's important to document your changes in the [Openapi file](./rpc/openapi/openapi.yaml)
|
||||
To test your changes you should install `nodejs` and run:
|
||||
**If you contribute to the RPC endpoints it's important to document your
|
||||
changes in the [Openapi file](./rpc/openapi/openapi.yaml)**.
|
||||
|
||||
To test your changes you must install `nodejs` and run:
|
||||
|
||||
```bash
|
||||
npm i -g dredd
|
||||
@@ -369,4 +451,8 @@ make build-linux build-contract-tests-hooks
|
||||
make contract-tests
|
||||
```
|
||||
|
||||
This command will popup a network and check every endpoint against what has been documented
|
||||
**WARNING: these are currently broken due to <https://github.com/apiaryio/dredd>
|
||||
not supporting complete OpenAPI 3**.
|
||||
|
||||
This command will popup a network and check every endpoint against what has
|
||||
been documented.
|
||||
|
||||
@@ -1,4 +1,14 @@
|
||||
FROM alpine:3.9
|
||||
# stage 1 Generate Tendermint Binary
|
||||
FROM golang:1.16-alpine as builder
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk --no-cache add make
|
||||
COPY / /tendermint
|
||||
WORKDIR /tendermint
|
||||
RUN make build-linux
|
||||
|
||||
# stage 2
|
||||
FROM golang:1.15-alpine
|
||||
LABEL maintainer="hello@tendermint.com"
|
||||
|
||||
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json
|
||||
@@ -29,15 +39,14 @@ EXPOSE 26656 26657 26660
|
||||
|
||||
STOPSIGNAL SIGTERM
|
||||
|
||||
ARG BINARY=tendermint
|
||||
COPY $BINARY /usr/bin/tendermint
|
||||
COPY --from=builder /tendermint/build/tendermint /usr/bin/tendermint
|
||||
|
||||
# You can overwrite these before the first run to influence
|
||||
# config.json and genesis.json. Additionally, you can override
|
||||
# CMD to add parameters to `tendermint node`.
|
||||
ENV PROXY_APP=kvstore MONIKER=dockernode CHAIN_ID=dockerchain
|
||||
|
||||
COPY ./docker-entrypoint.sh /usr/local/bin/
|
||||
COPY ./DOCKER/docker-entrypoint.sh /usr/local/bin/
|
||||
|
||||
ENTRYPOINT ["docker-entrypoint.sh"]
|
||||
CMD ["start"]
|
||||
|
||||
@@ -9,7 +9,7 @@ RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm &
|
||||
RUN yum -y groupinstall "Development Tools"
|
||||
RUN yum -y install leveldb-devel which
|
||||
|
||||
ENV GOVERSION=1.12.9
|
||||
ENV GOVERSION=1.16.5
|
||||
|
||||
RUN cd /tmp && \
|
||||
wget https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
|
||||
@@ -31,8 +31,8 @@ To get started developing applications, see the [application developers guide](h
|
||||
A quick example of a built-in app and Tendermint core in one container.
|
||||
|
||||
```sh
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy-app=kvstore
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init validator
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint start --proxy-app=kvstore
|
||||
```
|
||||
|
||||
## Local cluster
|
||||
|
||||
@@ -3,7 +3,7 @@ set -e
|
||||
|
||||
if [ ! -d "$TMHOME/config" ]; then
|
||||
echo "Running tendermint init to create (default) configuration for docker run."
|
||||
tendermint init
|
||||
tendermint init validator
|
||||
|
||||
sed -i \
|
||||
-e "s/^proxy-app\s*=.*/proxy-app = \"$PROXY_APP\"/" \
|
||||
|
||||
29
Makefile
29
Makefile
@@ -12,7 +12,7 @@ else
|
||||
VERSION := $(shell git describe)
|
||||
endif
|
||||
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMCoreSemVer=$(VERSION)
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION)
|
||||
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
|
||||
HTTPS_GIT := https://github.com/tendermint/tendermint.git
|
||||
DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf
|
||||
@@ -58,8 +58,6 @@ LD_FLAGS += $(LDFLAGS)
|
||||
all: check build test install
|
||||
.PHONY: all
|
||||
|
||||
# The below include contains the tools.
|
||||
include tools/Makefile
|
||||
include test/Makefile
|
||||
|
||||
###############################################################################
|
||||
@@ -85,18 +83,10 @@ proto-all: proto-gen proto-lint proto-check-breaking
|
||||
.PHONY: proto-all
|
||||
|
||||
proto-gen:
|
||||
## If you get the following error,
|
||||
## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory"
|
||||
## See https://stackoverflow.com/a/25518702
|
||||
## Note the $< here is substituted for the %.proto
|
||||
## Note the $@ here is substituted for the %.pb.go
|
||||
@sh scripts/protocgen.sh
|
||||
.PHONY: proto-gen
|
||||
|
||||
proto-gen-docker:
|
||||
@docker pull -q tendermintdev/docker-build-proto
|
||||
@echo "Generating Protobuf files"
|
||||
@docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto sh ./scripts/protocgen.sh
|
||||
.PHONY: proto-gen-docker
|
||||
.PHONY: proto-gen
|
||||
|
||||
proto-lint:
|
||||
@$(DOCKER_BUF) check lint --error-format=json
|
||||
@@ -212,7 +202,7 @@ format:
|
||||
|
||||
lint:
|
||||
@echo "--> Running linter"
|
||||
@golangci-lint run
|
||||
go run github.com/golangci/golangci-lint/cmd/golangci-lint run
|
||||
.PHONY: lint
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
@@ -241,12 +231,21 @@ build-docker: build-linux
|
||||
rm -rf DOCKER/tendermint
|
||||
.PHONY: build-docker
|
||||
|
||||
|
||||
###############################################################################
|
||||
### Mocks ###
|
||||
###############################################################################
|
||||
|
||||
mockery:
|
||||
go generate -run="./scripts/mockery_generate.sh" ./...
|
||||
.PHONY: mockery
|
||||
|
||||
###############################################################################
|
||||
### Local testnet using docker ###
|
||||
###############################################################################
|
||||
|
||||
# Build linux binary on other platforms
|
||||
build-linux: tools
|
||||
build-linux:
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||
.PHONY: build-linux
|
||||
|
||||
|
||||
16
README.md
16
README.md
@@ -8,7 +8,7 @@ Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for shor
|
||||
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
[](https://pkg.go.dev/github.com/tendermint/tendermint)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://discord.gg/vcExX9T)
|
||||
[](https://github.com/tendermint/tendermint/blob/master/LICENSE)
|
||||
[](https://github.com/tendermint/tendermint)
|
||||
@@ -18,8 +18,7 @@ Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for shor
|
||||
|--------|--------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------|
|
||||
| master |  | [](https://codecov.io/gh/tendermint/tendermint) |  |
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
|
||||
and securely replicates it on many machines.
|
||||
Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely replicates it on many machines.
|
||||
|
||||
For protocol details, see [the specification](https://github.com/tendermint/spec).
|
||||
|
||||
@@ -30,9 +29,7 @@ see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/ab
|
||||
|
||||
Please do not depend on master as your production branch. Use [releases](https://github.com/tendermint/tendermint/releases) instead.
|
||||
|
||||
Tendermint is being used in production in both private and public environments,
|
||||
most notably the blockchains of the [Cosmos Network](https://cosmos.network/).
|
||||
However, we are still making breaking changes to the protocol and the APIs and have not yet released v1.0.
|
||||
Tendermint has been in the production of private and public environments, most notably the blockchains of the Cosmos Network. we haven't released v1.0 yet since we are making breaking changes to the protocol and the APIs.
|
||||
See below for more details about [versioning](#versioning).
|
||||
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help. You can
|
||||
@@ -44,11 +41,14 @@ To report a security vulnerability, see our [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md).
|
||||
|
||||
We also maintain a dedicated mailing list for security updates. We will only ever use this mailing list
|
||||
to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe [here](http://eepurl.com/gZ5hQD).
|
||||
|
||||
## Minimum requirements
|
||||
|
||||
| Requirement | Notes |
|
||||
|-------------|------------------|
|
||||
| Go version | Go1.15 or higher |
|
||||
| Go version | Go1.16 or higher |
|
||||
|
||||
## Documentation
|
||||
|
||||
@@ -164,4 +164,4 @@ If you'd like to work full-time on Tendermint Core, [we're hiring](https://inter
|
||||
|
||||
Funding for Tendermint Core development comes primarily from the [Interchain Foundation](https://interchain.io),
|
||||
a Swiss non-profit. The Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the for-profit entity
|
||||
that also maintains [tendermint.com](https://tendermint.com).
|
||||
that also maintains [tendermint.com](https://tendermint.com).
|
||||
|
||||
78
SECURITY.md
78
SECURITY.md
@@ -7,54 +7,55 @@ Policy](https://tendermint.com/security), we operate a [bug
|
||||
bounty](https://hackerone.com/tendermint).
|
||||
See the policy for more details on submissions and rewards, and see "Example Vulnerabilities" (below) for examples of the kinds of bugs we're most interested in.
|
||||
|
||||
### Guidelines
|
||||
### Guidelines
|
||||
|
||||
We require that all researchers:
|
||||
|
||||
* Use the bug bounty to disclose all vulnerabilities, and avoid posting vulnerability information in public places, including Github Issues, Discord channels, and Telegram groups
|
||||
* Make every effort to avoid privacy violations, degradation of user experience, disruption to production systems (including but not limited to the Cosmos Hub), and destruction of data
|
||||
* Keep any information about vulnerabilities that you’ve discovered confidential between yourself and the Tendermint Core engineering team until the issue has been resolved and disclosed
|
||||
* Keep any information about vulnerabilities that you’ve discovered confidential between yourself and the Tendermint Core engineering team until the issue has been resolved and disclosed
|
||||
* Avoid posting personally identifiable information, privately or publicly
|
||||
|
||||
If you follow these guidelines when reporting an issue to us, we commit to:
|
||||
|
||||
* Not pursue or support any legal action related to your research on this vulnerability
|
||||
* Work with you to understand, resolve and ultimately disclose the issue in a timely fashion
|
||||
* Work with you to understand, resolve and ultimately disclose the issue in a timely fashion
|
||||
|
||||
## Disclosure Process
|
||||
## Disclosure Process
|
||||
|
||||
Tendermint Core uses the following disclosure process:
|
||||
|
||||
1. Once a security report is received, the Tendermint Core team works to verify the issue and confirm its severity level using CVSS.
|
||||
2. The Tendermint Core team collaborates with the Gaia team to determine the vulnerability’s potential impact on the Cosmos Hub.
|
||||
3. Patches are prepared for eligible releases of Tendermint in private repositories. See “Supported Releases” below for more information on which releases are considered eligible.
|
||||
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE Numbering Authority.
|
||||
1. Once a security report is received, the Tendermint Core team works to verify the issue and confirm its severity level using CVSS.
|
||||
2. The Tendermint Core team collaborates with the Gaia team to determine the vulnerability’s potential impact on the Cosmos Hub.
|
||||
3. Patches are prepared for eligible releases of Tendermint in private repositories. See “Supported Releases” below for more information on which releases are considered eligible.
|
||||
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE Numbering Authority.
|
||||
5. We notify the community that a security release is coming, to give users time to prepare their systems for the update. Notifications can include forum posts, tweets, and emails to partners and validators, including emails sent to the [Tendermint Security Mailing List](https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc).
|
||||
6. 24 hours following this notification, the fixes are applied publicly and new releases are issued.
|
||||
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these releases, and then themselves issue new releases.
|
||||
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we notify the community, again, through the same channels as above. We also publish a Security Advisory on Github and publish the CVE, as long as neither the Security Advisory nor the CVE include any information on how to exploit these vulnerabilities beyond what information is already available in the patch itself.
|
||||
9. Once the community is notified, we will pay out any relevant bug bounties to submitters.
|
||||
10. One week after the releases go out, we will publish a post with further details on the vulnerability as well as our response to it.
|
||||
6. 24 hours following this notification, the fixes are applied publicly and new releases are issued.
|
||||
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these releases, and then themselves issue new releases.
|
||||
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we notify the community, again, through the same channels as above. We also publish a Security Advisory on Github and publish the CVE, as long as neither the Security Advisory nor the CVE include any information on how to exploit these vulnerabilities beyond what information is already available in the patch itself.
|
||||
9. Once the community is notified, we will pay out any relevant bug bounties to submitters.
|
||||
10. One week after the releases go out, we will publish a post with further details on the vulnerability as well as our response to it.
|
||||
|
||||
This process can take some time. Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow the process described above to ensure that disclosures are handled consistently and to keep Tendermint Core and its downstream dependent projects--including but not limited to Gaia and the Cosmos Hub--as secure as possible.
|
||||
This process can take some time. Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow the process described above to ensure that disclosures are handled consistently and to keep Tendermint Core and its downstream dependent projects--including but not limited to Gaia and the Cosmos Hub--as secure as possible.
|
||||
|
||||
### Example Timeline
|
||||
### Example Timeline
|
||||
|
||||
The following is an example timeline for the triage and response. The required roles and team members are described in parentheses after each task; however, multiple people can play each role and each person may play multiple roles.
|
||||
The following is an example timeline for the triage and response. The required roles and team members are described in parentheses after each task; however, multiple people can play each role and each person may play multiple roles.
|
||||
|
||||
#### > 24 Hours Before Release Time
|
||||
#### 24+ Hours Before Release Time
|
||||
|
||||
1. Request CVE number (ADMIN)
|
||||
2. Gather emails and other contact info for validators (COMMS LEAD)
|
||||
3. Test fixes on a testnet (TENDERMINT ENG, COSMOS ENG)
|
||||
4. Write “Security Advisory” for forum (TENDERMINT LEAD)
|
||||
1. Request CVE number (ADMIN)
|
||||
2. Gather emails and other contact info for validators (COMMS LEAD)
|
||||
3. Create patches in a private security repo, and ensure that PRs are open targeting all relevant release branches (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
4. Test fixes on a testnet (TENDERMINT ENG, COSMOS SDK ENG)
|
||||
5. Write “Security Advisory” for forum (TENDERMINT LEAD)
|
||||
|
||||
#### 24 Hours Before Release Time
|
||||
|
||||
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
|
||||
2. Post Tweet linking to forum post (COMMS LEAD)
|
||||
3. Announce security advisory/link to post in various other social channels (Telegram, Discord) (COMMS LEAD)
|
||||
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
|
||||
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
|
||||
2. Post Tweet linking to forum post (COMMS LEAD)
|
||||
3. Announce security advisory/link to post in various other social channels (Telegram, Discord) (COMMS LEAD)
|
||||
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
|
||||
|
||||
#### Release Time
|
||||
|
||||
@@ -64,36 +65,36 @@ The following is an example timeline for the triage and response. The required r
|
||||
4. Post “Security releases” on forum (TENDERMINT LEAD)
|
||||
5. Post new Tweet linking to forum post (COMMS LEAD)
|
||||
6. Remind everyone via social channels (Telegram, Discord) that the release is out (COMMS LEAD)
|
||||
7. Send emails to validators or other users (COMMS LEAD)
|
||||
8. Publish Security Advisory and CVE, if CVE has no sensitive information (ADMIN)
|
||||
7. Send emails to validators or other users (COMMS LEAD)
|
||||
8. Publish Security Advisory and CVE, if CVE has no sensitive information (ADMIN)
|
||||
|
||||
#### After Release Time
|
||||
|
||||
1. Write forum post with exploit details (TENDERMINT LEAD)
|
||||
2. Approve pay-out on HackerOne for submitter (ADMIN)
|
||||
2. Approve pay-out on HackerOne for submitter (ADMIN)
|
||||
|
||||
#### 7 Days After Release Time
|
||||
|
||||
1. Publish CVE if it has not yet been published (ADMIN)
|
||||
1. Publish CVE if it has not yet been published (ADMIN)
|
||||
2. Publish forum post with exploit details (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
|
||||
## Supported Releases
|
||||
|
||||
The Tendermint Core team commits to releasing security patch releases for both the latest minor release as well for the major/minor release that the Cosmos Hub is running.
|
||||
The Tendermint Core team commits to releasing security patch releases for both the latest minor release as well for the major/minor release that the Cosmos Hub is running.
|
||||
|
||||
If you are running older versions of Tendermint Core, we encourage you to upgrade at your earliest opportunity so that you can receive security patches directly from the Tendermint repo. While you are welcome to backport security patches to older versions for your own use, we will not publish or promote these backports.
|
||||
If you are running older versions of Tendermint Core, we encourage you to upgrade at your earliest opportunity so that you can receive security patches directly from the Tendermint repo. While you are welcome to backport security patches to older versions for your own use, we will not publish or promote these backports.
|
||||
|
||||
## Scope
|
||||
|
||||
The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/tendermint). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope:
|
||||
|
||||
* Any third-party services
|
||||
* Findings from physical testing, such as office access
|
||||
* Any third-party services
|
||||
* Findings from physical testing, such as office access
|
||||
* Findings derived from social engineering (e.g., phishing)
|
||||
|
||||
## Example Vulnerabilities
|
||||
## Example Vulnerabilities
|
||||
|
||||
The following is a list of examples of the kinds of vulnerabilities that we’re most interested in. It is not exhaustive: there are other kinds of issues we may also be interested in!
|
||||
The following is a list of examples of the kinds of vulnerabilities that we’re most interested in. It is not exhaustive: there are other kinds of issues we may also be interested in!
|
||||
|
||||
### Specification
|
||||
|
||||
@@ -114,6 +115,9 @@ Assuming less than 1/3 of the voting power is Byzantine (malicious):
|
||||
* A node halting (liveness failure)
|
||||
* Syncing new and old nodes
|
||||
|
||||
Assuming more than 1/3 the voting power is Byzantine:
|
||||
|
||||
* Attacks that go unpunished (unhandled evidence)
|
||||
|
||||
### Networking
|
||||
|
||||
@@ -139,7 +143,7 @@ Attacks may come through the P2P network or the RPC layer:
|
||||
|
||||
### Libraries
|
||||
|
||||
* Serialization (Amino)
|
||||
* Serialization
|
||||
* Reading/Writing files and databases
|
||||
|
||||
### Cryptography
|
||||
@@ -150,5 +154,5 @@ Attacks may come through the P2P network or the RPC layer:
|
||||
|
||||
### Light Client
|
||||
|
||||
* Core verification
|
||||
* Core verification
|
||||
* Bisection/sequential algorithms
|
||||
|
||||
158
UPGRADING.md
158
UPGRADING.md
@@ -7,38 +7,132 @@ This guide provides instructions for upgrading to specific versions of Tendermin
|
||||
### ABCI Changes
|
||||
|
||||
* Added `AbciVersion` to `RequestInfo`. Applications should check that the ABCI version they expect is being used in order to avoid unimplemented changes errors.
|
||||
|
||||
* The method `SetOption` has been removed from the ABCI.Client interface. This feature was used in the early ABCI implementation's.
|
||||
|
||||
* Messages are written to a byte stream using uin64 length delimiters instead of int64.
|
||||
* When mempool `v1` is enabled, transactions broadcasted via `sync` mode may return a successful
|
||||
response with a transaction hash indicating that the transaction was successfully inserted into
|
||||
the mempool. While this is true for `v0`, the `v1` mempool reactor may at a later point in time
|
||||
evict or even drop this transaction after a hash has been returned. Thus, the user or client must
|
||||
query for that transaction to check if it is still in the mempool.
|
||||
|
||||
### Config Changes
|
||||
|
||||
* `fast_sync = "v1"` is no longer supported. Please use `v2` instead.
|
||||
* `fast_sync = "v1"` and `fast_sync = "v2"` are no longer supported. Please use `v0` instead.
|
||||
|
||||
* All config parameters are now hyphen-case (also known as kebab-case) instead of snake_case. Before restarting the node make sure
|
||||
you have updated all the variables in your `config.toml` file.
|
||||
you have updated all the variables in your `config.toml` file.
|
||||
|
||||
* Added `--mode` flag and `mode` config variable on `config.toml` for setting Mode of the Node: `full` | `validator` | `seed` (default: `full`)
|
||||
[ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md)
|
||||
|
||||
* `BootstrapPeers` has been added as part of the new p2p stack. This will eventually replace
|
||||
`Seeds`. Bootstrap peers are connected with on startup if needed for peer discovery. Unlike
|
||||
persistent peers, there's no gaurantee that the node will remain connected with these peers.
|
||||
|
||||
* configuration values starting with `priv-validator-` have moved to the new
|
||||
`priv-validator` section, without the `priv-validator-` prefix.
|
||||
|
||||
* The fast sync process as well as the blockchain package and service has all
|
||||
been renamed to block sync
|
||||
|
||||
### Key Format Changes
|
||||
|
||||
The format of all tendermint on-disk database keys changes in
|
||||
0.35. Upgrading nodes must either re-sync all data or run a migration
|
||||
script provided in this release. The script located in
|
||||
`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go`
|
||||
provides the function `Migrate(context.Context, db.DB)` which you can
|
||||
operationalize as makes sense for your deployment.
|
||||
|
||||
For ease of use the `tendermint` command includes a CLI version of the
|
||||
migration script, which you can invoke, as in:
|
||||
|
||||
tendermint key-migrate
|
||||
|
||||
This reads the configuration file as normal and allows the
|
||||
`--db-backend` and `--db-dir` flags to change database operations as
|
||||
needed.
|
||||
|
||||
The migration operation is idempotent and can be run more than once,
|
||||
if needed.
|
||||
|
||||
### CLI Changes
|
||||
|
||||
* You must now specify the node mode (validator|full|seed) in `tendermint init [mode]`
|
||||
|
||||
* If you had previously used `tendermint gen_node_key` to generate a new node
|
||||
key, keep in mind that it no longer saves the output to a file. You can use
|
||||
`tendermint init` or pipe the output of `tendermint gen_node_key` to
|
||||
`tendermint init validator` or pipe the output of `tendermint gen_node_key` to
|
||||
`$TMHOME/config/node_key.json`:
|
||||
|
||||
```
|
||||
$ tendermint gen_node_key > $TMHOME/config/node_key.json
|
||||
```
|
||||
|
||||
* CLI commands and flags are all now hyphen-case instead of snake_case.
|
||||
* CLI commands and flags are all now hyphen-case instead of snake_case.
|
||||
Make sure to adjust any scripts that calls a cli command with snake_casing
|
||||
|
||||
### API Changes
|
||||
|
||||
The p2p layer was reimplemented as part of the 0.35 release cycle, and
|
||||
all reactors were refactored. As part of that work these
|
||||
implementations moved into the `internal` package and are no longer
|
||||
considered part of the public Go API of tendermint. These packages
|
||||
are:
|
||||
|
||||
- `p2p`
|
||||
- `mempool`
|
||||
- `consensus`
|
||||
- `statesync`
|
||||
- `blockchain`
|
||||
- `evidence`
|
||||
|
||||
Accordingly, the `node` package was changed to reduce access to
|
||||
tendermint internals: applications that use tendermint as a library
|
||||
will need to change to accommodate these changes. Most notably:
|
||||
|
||||
- The `Node` type has become internal, and all constructors return a
|
||||
`service.Service` implementation.
|
||||
|
||||
- The `node.DefaultNewNode` and `node.NewNode` constructors are no
|
||||
longer exported and have been replaced with `node.New` and
|
||||
`node.NewDefault` which provide more functional interfaces.
|
||||
|
||||
### RPC changes
|
||||
|
||||
#### gRPC Support
|
||||
|
||||
Mark gRPC in the RPC layer as deprecated and to be removed in 0.36.
|
||||
|
||||
#### Peer Management Interface
|
||||
|
||||
When running with the new P2P Layer, the methods `UnsafeDialSeeds` and
|
||||
`UnsafeDialPeers` RPC methods will always return an error. They are
|
||||
deprecated and will be removed in 0.36 when the legacy peer stack is
|
||||
removed.
|
||||
|
||||
Additionally the format of the Peer list returned in the `NetInfo`
|
||||
method changes in this release to accommodate the different way that
|
||||
the new stack tracks data about peers. This change affects users of
|
||||
both stacks.
|
||||
|
||||
### Support for Custom Reactor and Mempool Implementations
|
||||
|
||||
The changes to p2p layer removed existing support for custom
|
||||
reactors. Based on our understanding of how this functionality was
|
||||
used, the introduction of the prioritized mempool covers nearly all of
|
||||
the use cases for custom reactors. If you are currently running custom
|
||||
reactors and mempools and are having trouble seeing the migration path
|
||||
for your project please feel free to reach out to the Tendermint Core
|
||||
development team directly.
|
||||
|
||||
## v0.34.0
|
||||
|
||||
**Upgrading to Tendermint 0.34 requires a blockchain restart.**
|
||||
This release is not compatible with previous blockchains due to changes to
|
||||
the encoding format (see "Protocol Buffers," below) and the block header (see "Blockchain Protocol").
|
||||
|
||||
Note also that Tendermint 0.34 also requires Go 1.15 or higher.
|
||||
Note also that Tendermint 0.34 also requires Go 1.16 or higher.
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
@@ -185,8 +279,8 @@ Other user-relevant changes include:
|
||||
|
||||
* The old `lite` package was removed; the new light client uses the `light` package.
|
||||
* The `Verifier` was broken up into two pieces:
|
||||
* Core verification logic (pure `VerifyX` functions)
|
||||
* `Client` object, which represents the complete light client
|
||||
* Core verification logic (pure `VerifyX` functions)
|
||||
* `Client` object, which represents the complete light client
|
||||
* The new light clients stores headers & validator sets as `LightBlock`s
|
||||
* The RPC client can be found in the `/rpc` directory.
|
||||
* The HTTP(S) proxy is located in the `/proxy` directory.
|
||||
@@ -318,12 +412,12 @@ Evidence Params has been changed to include duration.
|
||||
### Go API
|
||||
|
||||
* `libs/common` has been removed in favor of specific pkgs.
|
||||
* `async`
|
||||
* `service`
|
||||
* `rand`
|
||||
* `net`
|
||||
* `strings`
|
||||
* `cmap`
|
||||
* `async`
|
||||
* `service`
|
||||
* `rand`
|
||||
* `net`
|
||||
* `strings`
|
||||
* `cmap`
|
||||
* removal of `errors` pkg
|
||||
|
||||
### RPC Changes
|
||||
@@ -392,9 +486,9 @@ Prior to the update, suppose your `ResponseDeliverTx` look like:
|
||||
```go
|
||||
abci.ResponseDeliverTx{
|
||||
Tags: []kv.Pair{
|
||||
{Key: []byte("sender"), Value: []byte("foo")},
|
||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||
{Key: []byte("amount"), Value: []byte("35")},
|
||||
{Key: []byte("sender"), Value: []byte("foo")},
|
||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||
{Key: []byte("amount"), Value: []byte("35")},
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -413,14 +507,14 @@ the following `Events`:
|
||||
```go
|
||||
abci.ResponseDeliverTx{
|
||||
Events: []abci.Event{
|
||||
{
|
||||
Type: "transfer",
|
||||
Attributes: kv.Pairs{
|
||||
{Key: []byte("sender"), Value: []byte("foo")},
|
||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||
{Key: []byte("amount"), Value: []byte("35")},
|
||||
},
|
||||
}
|
||||
{
|
||||
Type: "transfer",
|
||||
Attributes: kv.Pairs{
|
||||
{Key: []byte("sender"), Value: []byte("foo")},
|
||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||
{Key: []byte("amount"), Value: []byte("35")},
|
||||
},
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -468,9 +562,9 @@ In this case, the WS client will receive an error with description:
|
||||
"jsonrpc": "2.0",
|
||||
"id": "{ID}#event",
|
||||
"error": {
|
||||
"code": -32000,
|
||||
"msg": "Server error",
|
||||
"data": "subscription was cancelled (reason: client is not pulling messages fast enough)" // or "subscription was cancelled (reason: Tendermint exited)"
|
||||
"code": -32000,
|
||||
"msg": "Server error",
|
||||
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -676,9 +770,9 @@ just the `Data` field set:
|
||||
|
||||
```go
|
||||
[]ProofOp{
|
||||
ProofOp{
|
||||
Data: <proof bytes>,
|
||||
}
|
||||
ProofOp{
|
||||
Data: <proof bytes>,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -15,7 +15,7 @@ const (
|
||||
echoRetryIntervalSeconds = 1
|
||||
)
|
||||
|
||||
//go:generate mockery --case underscore --name Client
|
||||
//go:generate ../../scripts/mockery_generate.sh Client
|
||||
|
||||
// Client defines an interface for an ABCI client.
|
||||
//
|
||||
@@ -80,18 +80,14 @@ func NewClient(addr, transport string, mustConnect bool) (client Client, err err
|
||||
return
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
type Callback func(*types.Request, *types.Response)
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
type ReqRes struct {
|
||||
*types.Request
|
||||
*sync.WaitGroup
|
||||
*types.Response // Not set atomically, so be sure to use WaitGroup.
|
||||
|
||||
mtx tmsync.Mutex
|
||||
mtx tmsync.RWMutex
|
||||
done bool // Gets set to true once *after* WaitGroup.Done().
|
||||
cb func(*types.Response) // A single callback that may be set.
|
||||
}
|
||||
@@ -107,34 +103,50 @@ func NewReqRes(req *types.Request) *ReqRes {
|
||||
}
|
||||
}
|
||||
|
||||
// Sets the callback for this ReqRes atomically.
|
||||
// If reqRes is already done, calls cb immediately.
|
||||
// NOTE: reqRes.cb should not change if reqRes.done.
|
||||
// NOTE: only one callback is supported.
|
||||
func (reqRes *ReqRes) SetCallback(cb func(res *types.Response)) {
|
||||
reqRes.mtx.Lock()
|
||||
// Sets sets the callback. If reqRes is already done, it will call the cb
|
||||
// immediately. Note, reqRes.cb should not change if reqRes.done and only one
|
||||
// callback is supported.
|
||||
func (r *ReqRes) SetCallback(cb func(res *types.Response)) {
|
||||
r.mtx.Lock()
|
||||
|
||||
if reqRes.done {
|
||||
reqRes.mtx.Unlock()
|
||||
cb(reqRes.Response)
|
||||
if r.done {
|
||||
r.mtx.Unlock()
|
||||
cb(r.Response)
|
||||
return
|
||||
}
|
||||
|
||||
reqRes.cb = cb
|
||||
reqRes.mtx.Unlock()
|
||||
r.cb = cb
|
||||
r.mtx.Unlock()
|
||||
}
|
||||
|
||||
func (reqRes *ReqRes) GetCallback() func(*types.Response) {
|
||||
reqRes.mtx.Lock()
|
||||
defer reqRes.mtx.Unlock()
|
||||
return reqRes.cb
|
||||
// InvokeCallback invokes a thread-safe execution of the configured callback
|
||||
// if non-nil.
|
||||
func (r *ReqRes) InvokeCallback() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
if r.cb != nil {
|
||||
r.cb(r.Response)
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: it should be safe to read reqRes.cb without locks after this.
|
||||
func (reqRes *ReqRes) SetDone() {
|
||||
reqRes.mtx.Lock()
|
||||
reqRes.done = true
|
||||
reqRes.mtx.Unlock()
|
||||
// GetCallback returns the configured callback of the ReqRes object which may be
|
||||
// nil. Note, it is not safe to concurrently call this in cases where it is
|
||||
// marked done and SetCallback is called before calling GetCallback as that
|
||||
// will invoke the callback twice and create a potential race condition.
|
||||
//
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5439
|
||||
func (r *ReqRes) GetCallback() func(*types.Response) {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
return r.cb
|
||||
}
|
||||
|
||||
// SetDone marks the ReqRes object as done.
|
||||
func (r *ReqRes) SetDone() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
r.done = true
|
||||
}
|
||||
|
||||
func waitGroup1() (wg *sync.WaitGroup) {
|
||||
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
// A gRPC client.
|
||||
@@ -24,7 +24,7 @@ type grpcClient struct {
|
||||
conn *grpc.ClientConn
|
||||
chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool
|
||||
|
||||
mtx tmsync.Mutex
|
||||
mtx tmsync.RWMutex
|
||||
addr string
|
||||
err error
|
||||
resCb func(*types.Request, *types.Response) // listens to all callbacks
|
||||
@@ -149,8 +149,8 @@ func (cli *grpcClient) StopForError(err error) {
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Error() error {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.mtx.RLock()
|
||||
defer cli.mtx.RUnlock()
|
||||
return cli.err
|
||||
}
|
||||
|
||||
@@ -158,8 +158,8 @@ func (cli *grpcClient) Error() error {
|
||||
// NOTE: callback may get internally generated flush responses.
|
||||
func (cli *grpcClient) SetResponseCallback(resCb Callback) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.resCb = resCb
|
||||
cli.mtx.Unlock()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
// NOTE: use defer to unlock mutex because Application might panic (e.g., in
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
type localClient struct {
|
||||
service.BaseService
|
||||
|
||||
mtx *tmsync.Mutex
|
||||
mtx *tmsync.RWMutex
|
||||
types.Application
|
||||
Callback
|
||||
}
|
||||
@@ -26,22 +26,24 @@ var _ Client = (*localClient)(nil)
|
||||
// methods of the given app.
|
||||
//
|
||||
// Both Async and Sync methods ignore the given context.Context parameter.
|
||||
func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client {
|
||||
func NewLocalClient(mtx *tmsync.RWMutex, app types.Application) Client {
|
||||
if mtx == nil {
|
||||
mtx = new(tmsync.Mutex)
|
||||
mtx = &tmsync.RWMutex{}
|
||||
}
|
||||
|
||||
cli := &localClient{
|
||||
mtx: mtx,
|
||||
Application: app,
|
||||
}
|
||||
|
||||
cli.BaseService = *service.NewBaseService(nil, "localClient", cli)
|
||||
return cli
|
||||
}
|
||||
|
||||
func (app *localClient) SetResponseCallback(cb Callback) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
app.Callback = cb
|
||||
app.mtx.Unlock()
|
||||
}
|
||||
|
||||
// TODO: change types.Application to include Error()?
|
||||
@@ -65,8 +67,8 @@ func (app *localClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, err
|
||||
}
|
||||
|
||||
func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
|
||||
res := app.Application.Info(req)
|
||||
return app.callback(
|
||||
@@ -98,8 +100,8 @@ func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheck
|
||||
}
|
||||
|
||||
func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
|
||||
res := app.Application.Query(req)
|
||||
return app.callback(
|
||||
@@ -213,8 +215,8 @@ func (app *localClient) EchoSync(ctx context.Context, msg string) (*types.Respon
|
||||
}
|
||||
|
||||
func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
|
||||
res := app.Application.Info(req)
|
||||
return &res, nil
|
||||
@@ -247,8 +249,8 @@ func (app *localClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
|
||||
res := app.Application.Query(req)
|
||||
return &res, nil
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v2.3.0. DO NOT EDIT.
|
||||
// Code generated by mockery. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
@@ -796,3 +796,8 @@ func (_m *Client) String() string {
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Wait provides a mock function with given fields:
|
||||
func (_m *Client) Wait() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
@@ -12,10 +12,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/libs/timer"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/timer"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -43,7 +43,7 @@ type socketClient struct {
|
||||
reqQueue chan *reqResWithContext
|
||||
flushTimer *timer.ThrottleTimer
|
||||
|
||||
mtx tmsync.Mutex
|
||||
mtx tmsync.RWMutex
|
||||
err error
|
||||
reqSent *list.List // list of requests sent, waiting for response
|
||||
resCb func(*types.Request, *types.Response) // called on all requests, if set.
|
||||
@@ -108,8 +108,8 @@ func (cli *socketClient) OnStop() {
|
||||
|
||||
// Error returns an error if the client was stopped abruptly.
|
||||
func (cli *socketClient) Error() error {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.mtx.RLock()
|
||||
defer cli.mtx.RUnlock()
|
||||
return cli.err
|
||||
}
|
||||
|
||||
@@ -119,8 +119,8 @@ func (cli *socketClient) Error() error {
|
||||
// NOTE: callback may get internally generated flush responses.
|
||||
func (cli *socketClient) SetResponseCallback(resCb Callback) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.resCb = resCb
|
||||
cli.mtx.Unlock()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -226,9 +226,7 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
//
|
||||
// NOTE: It is possible this callback isn't set on the reqres object. At this
|
||||
// point, in which case it will be called after, when it is set.
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(res)
|
||||
}
|
||||
reqres.InvokeCallback()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -6,13 +6,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"math/rand"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
@@ -101,7 +102,7 @@ func TestHangingSyncCalls(t *testing.T) {
|
||||
func setupClientServer(t *testing.T, app types.Application) (
|
||||
service.Service, abcicli.Client) {
|
||||
// some port between 20k and 30k
|
||||
port := 20000 + tmrand.Int32()%10000
|
||||
port := 20000 + rand.Int31()%10000
|
||||
addr := fmt.Sprintf("localhost:%d", port)
|
||||
|
||||
s, err := server.NewServer(addr, "socket", app)
|
||||
|
||||
@@ -17,7 +17,6 @@ import (
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
servertest "github.com/tendermint/tendermint/abci/tests/server"
|
||||
@@ -47,9 +46,6 @@ var (
|
||||
flagHeight int
|
||||
flagProve bool
|
||||
|
||||
// counter
|
||||
flagSerial bool
|
||||
|
||||
// kvstore
|
||||
flagPersist string
|
||||
)
|
||||
@@ -61,19 +57,14 @@ var RootCmd = &cobra.Command{
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
switch cmd.Use {
|
||||
case "counter", "kvstore": // for the examples apps, don't pre-run
|
||||
return nil
|
||||
case "version": // skip running for version command
|
||||
case "kvstore", "version":
|
||||
return nil
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
allowLevel, err := log.AllowLevel(flagLogLevel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger = log.NewFilter(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), allowLevel)
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
}
|
||||
|
||||
if client == nil {
|
||||
var err error
|
||||
client, err = abcicli.NewClient(flagAddress, flagAbci, false)
|
||||
@@ -138,10 +129,6 @@ func addQueryFlags() {
|
||||
"whether or not to return a merkle proof of the query result")
|
||||
}
|
||||
|
||||
func addCounterFlags() {
|
||||
counterCmd.PersistentFlags().BoolVarP(&flagSerial, "serial", "", false, "enforce incrementing (serial) transactions")
|
||||
}
|
||||
|
||||
func addKVStoreFlags() {
|
||||
kvstoreCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
|
||||
}
|
||||
@@ -160,8 +147,6 @@ func addCommands() {
|
||||
RootCmd.AddCommand(queryCmd)
|
||||
|
||||
// examples
|
||||
addCounterFlags()
|
||||
RootCmd.AddCommand(counterCmd)
|
||||
addKVStoreFlags()
|
||||
RootCmd.AddCommand(kvstoreCmd)
|
||||
}
|
||||
@@ -261,14 +246,6 @@ var queryCmd = &cobra.Command{
|
||||
RunE: cmdQuery,
|
||||
}
|
||||
|
||||
var counterCmd = &cobra.Command{
|
||||
Use: "counter",
|
||||
Short: "ABCI demo example",
|
||||
Long: "ABCI demo example",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdCounter,
|
||||
}
|
||||
|
||||
var kvstoreCmd = &cobra.Command{
|
||||
Use: "kvstore",
|
||||
Short: "ABCI demo example",
|
||||
@@ -596,34 +573,8 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
app := counter.NewApplication(flagSerial)
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddress, flagAbci, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
if err := srv.Stop(); err != nil {
|
||||
logger.Error("Error while stopping server", "err", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
}
|
||||
|
||||
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
|
||||
// Create the application - in memory or persisted to disk
|
||||
var app types.Application
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
package counter
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
type Application struct {
|
||||
types.BaseApplication
|
||||
|
||||
hashCount int
|
||||
txCount int
|
||||
serial bool
|
||||
}
|
||||
|
||||
func NewApplication(serial bool) *Application {
|
||||
return &Application{serial: serial}
|
||||
}
|
||||
|
||||
func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
|
||||
}
|
||||
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue != uint64(app.txCount) {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
}
|
||||
app.txCount++
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
return types.ResponseCheckTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue < uint64(app.txCount) {
|
||||
return types.ResponseCheckTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
}
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *Application) Commit() (resp types.ResponseCommit) {
|
||||
app.hashCount++
|
||||
if app.txCount == 0 {
|
||||
return types.ResponseCommit{}
|
||||
}
|
||||
hash := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
|
||||
return types.ResponseCommit{Data: hash}
|
||||
}
|
||||
|
||||
func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
switch reqQuery.Path {
|
||||
case "hash":
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}
|
||||
case "tx":
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))}
|
||||
default:
|
||||
return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
|
||||
}
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
package kvstore
|
||||
|
||||
import (
|
||||
mrand "math/rand"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
)
|
||||
@@ -9,7 +11,8 @@ import (
|
||||
// from the input value
|
||||
func RandVal(i int) types.ValidatorUpdate {
|
||||
pubkey := tmrand.Bytes(32)
|
||||
power := tmrand.Uint16() + 1
|
||||
// Random value between [0, 2^16 - 1]
|
||||
power := mrand.Uint32() & (1<<16 - 1) // nolint:gosec // G404: Use of weak random number generator
|
||||
v := types.UpdateValidator(pubkey, int64(power), "")
|
||||
return v
|
||||
}
|
||||
|
||||
@@ -87,15 +87,16 @@ func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo)
|
||||
|
||||
// tx is either "key=value" or just arbitrary bytes
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
var key, value []byte
|
||||
var key, value string
|
||||
|
||||
parts := bytes.Split(req.Tx, []byte("="))
|
||||
if len(parts) == 2 {
|
||||
key, value = parts[0], parts[1]
|
||||
key, value = string(parts[0]), string(parts[1])
|
||||
} else {
|
||||
key, value = req.Tx, req.Tx
|
||||
key, value = string(req.Tx), string(req.Tx)
|
||||
}
|
||||
|
||||
err := app.state.db.Set(prefixKey(key), value)
|
||||
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -105,10 +106,10 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []types.EventAttribute{
|
||||
{Key: []byte("creator"), Value: []byte("Cosmoshi Netowoko"), Index: true},
|
||||
{Key: []byte("key"), Value: key, Index: true},
|
||||
{Key: []byte("index_key"), Value: []byte("index is working"), Index: true},
|
||||
{Key: []byte("noindex_key"), Value: []byte("index is working"), Index: false},
|
||||
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
|
||||
{Key: "key", Value: key, Index: true},
|
||||
{Key: "index_key", Value: "index is working", Index: true},
|
||||
{Key: "noindex_key", Value: "index is working", Index: false},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -51,6 +51,10 @@ func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication
|
||||
}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) Close() error {
|
||||
return app.app.state.db.Close()
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) SetLogger(l log.Logger) {
|
||||
app.logger = l
|
||||
}
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
"runtime"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
tmlog "github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
// var maxNumberConnections = 2
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
mrand "math/rand"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
@@ -18,7 +19,8 @@ func InitChain(client abcicli.Client) error {
|
||||
vals := make([]types.ValidatorUpdate, total)
|
||||
for i := 0; i < total; i++ {
|
||||
pubkey := tmrand.Bytes(33)
|
||||
power := tmrand.Int()
|
||||
// nolint:gosec // G404: Use of weak random number generator
|
||||
power := mrand.Int()
|
||||
vals[i] = types.UpdateValidator(pubkey, int64(power), "")
|
||||
}
|
||||
_, err := client.InitChainSync(ctx, types.RequestInitChain{
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func startClient(abciType string) abcicli.Client {
|
||||
// Start client
|
||||
client, err := abcicli.NewClient("tcp://127.0.0.1:26658", abciType, true)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
client.SetLogger(logger.With("module", "abcicli"))
|
||||
if err := client.Start(); err != nil {
|
||||
panicf("connecting to abci_app: %v", err.Error())
|
||||
}
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
func commit(client abcicli.Client, hashExp []byte) {
|
||||
res, err := client.CommitSync(ctx)
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if !bytes.Equal(res.Data, hashExp) {
|
||||
panicf("Commit hash was unexpected. Got %X expected %X", res.Data, hashExp)
|
||||
}
|
||||
}
|
||||
|
||||
func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
|
||||
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if res.Code != codeExp {
|
||||
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", res.Code, codeExp, res.Log)
|
||||
}
|
||||
if !bytes.Equal(res.Data, dataExp) {
|
||||
panicf("DeliverTx response data was unexpected. Got %X expected %X", res.Data, dataExp)
|
||||
}
|
||||
}
|
||||
|
||||
func panicf(format string, a ...interface{}) {
|
||||
panic(fmt.Sprintf(format, a...))
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
var abciType string
|
||||
|
||||
func init() {
|
||||
abciType = os.Getenv("ABCI")
|
||||
if abciType == "" {
|
||||
abciType = "socket"
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
testCounter()
|
||||
}
|
||||
|
||||
const (
|
||||
maxABCIConnectTries = 10
|
||||
)
|
||||
|
||||
func ensureABCIIsUp(typ string, n int) error {
|
||||
var err error
|
||||
cmdString := "abci-cli echo hello"
|
||||
if typ == "grpc" {
|
||||
cmdString = "abci-cli --abci grpc echo hello"
|
||||
}
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
cmd := exec.Command("bash", "-c", cmdString)
|
||||
_, err = cmd.CombinedOutput()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func testCounter() {
|
||||
abciApp := os.Getenv("ABCI_APP")
|
||||
if abciApp == "" {
|
||||
panic("No ABCI_APP specified")
|
||||
}
|
||||
|
||||
fmt.Printf("Running %s test with abci=%s\n", abciApp, abciType)
|
||||
subCommand := fmt.Sprintf("abci-cli %s", abciApp)
|
||||
cmd := exec.Command("bash", "-c", subCommand)
|
||||
cmd.Stdout = os.Stdout
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatalf("starting %q err: %v", abciApp, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
log.Printf("error on process kill: %v", err)
|
||||
}
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Printf("error while waiting for cmd to exit: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil {
|
||||
log.Fatalf("echo failed: %v", err) //nolint:gocritic
|
||||
}
|
||||
|
||||
client := startClient(abciType)
|
||||
defer func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
log.Printf("error trying client stop: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// commit(client, nil)
|
||||
// deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
|
||||
commit(client, nil)
|
||||
deliverTx(client, []byte{0x00}, types.CodeTypeOK, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
|
||||
// deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
|
||||
deliverTx(client, []byte{0x01}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x02}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x03}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x00, 0x04}, types.CodeTypeOK, nil)
|
||||
// deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
#! /bin/bash
|
||||
set -e
|
||||
|
||||
# These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it
|
||||
|
||||
# Get the directory of where this script is.
|
||||
export PATH="$GOBIN:$PATH"
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
||||
# Change into that dir because we expect that.
|
||||
cd "$DIR"
|
||||
|
||||
echo "RUN COUNTER OVER SOCKET"
|
||||
# test golang counter
|
||||
ABCI_APP="counter" go run -mod=readonly ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
|
||||
echo "RUN COUNTER OVER GRPC"
|
||||
# test golang counter via grpc
|
||||
ABCI_APP="counter --abci=grpc" ABCI="grpc" go run -mod=readonly ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
# test nodejs counter
|
||||
# TODO: fix node app
|
||||
#ABCI_APP="node $GOPATH/src/github.com/tendermint/js-abci/example/app.js" go test -test.run TestCounter
|
||||
@@ -37,7 +37,6 @@ function testExample() {
|
||||
}
|
||||
|
||||
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
|
||||
testExample 2 tests/test_cli/ex2.abci abci-cli counter
|
||||
|
||||
echo ""
|
||||
echo "PASS"
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/tendermint/tendermint/libs/protoio"
|
||||
"github.com/tendermint/tendermint/internal/libs/protoio"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -15,11 +15,7 @@ const (
|
||||
func WriteMessage(msg proto.Message, w io.Writer) error {
|
||||
protoWriter := protoio.NewDelimitedWriter(w)
|
||||
_, err := protoWriter.WriteMsg(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return err
|
||||
}
|
||||
|
||||
// ReadMessage reads a varint length-delimited protobuf message.
|
||||
|
||||
@@ -25,7 +25,7 @@ func TestMarshalJSON(t *testing.T) {
|
||||
{
|
||||
Type: "testEvent",
|
||||
Attributes: []EventAttribute{
|
||||
{Key: []byte("pho"), Value: []byte("bo")},
|
||||
{Key: "pho", Value: "bo"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -92,7 +92,7 @@ func TestWriteReadMessage2(t *testing.T) {
|
||||
{
|
||||
Type: "testEvent",
|
||||
Attributes: []EventAttribute{
|
||||
{Key: []byte("abc"), Value: []byte("def")},
|
||||
{Key: "abc", Value: "def"},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/crypto/secp256k1"
|
||||
"github.com/tendermint/tendermint/crypto/sr25519"
|
||||
)
|
||||
|
||||
func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate {
|
||||
@@ -17,7 +18,6 @@ func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate {
|
||||
}
|
||||
|
||||
return ValidatorUpdate{
|
||||
// Address:
|
||||
PubKey: pkp,
|
||||
Power: power,
|
||||
}
|
||||
@@ -34,7 +34,16 @@ func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate {
|
||||
panic(err)
|
||||
}
|
||||
return ValidatorUpdate{
|
||||
// Address:
|
||||
PubKey: pkp,
|
||||
Power: power,
|
||||
}
|
||||
case sr25519.KeyType:
|
||||
pke := sr25519.PubKey(pk)
|
||||
pkp, err := cryptoenc.PubKeyToProto(pke)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ValidatorUpdate{
|
||||
PubKey: pkp,
|
||||
Power: power,
|
||||
}
|
||||
|
||||
@@ -42,7 +42,7 @@ func (r ResponseQuery) IsErr() bool {
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// override JSON marshalling so we emit defaults (ie. disable omitempty)
|
||||
// override JSON marshaling so we emit defaults (ie. disable omitempty)
|
||||
|
||||
var (
|
||||
jsonpbMarshaller = jsonpb.Marshaler{
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,49 +0,0 @@
|
||||
package behaviour
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// PeerBehaviour is a struct describing a behaviour a peer performed.
|
||||
// `peerID` identifies the peer and reason characterizes the specific
|
||||
// behaviour performed by the peer.
|
||||
type PeerBehaviour struct {
|
||||
peerID p2p.NodeID
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
type badMessage struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// BadMessage returns a badMessage PeerBehaviour.
|
||||
func BadMessage(peerID p2p.NodeID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: badMessage{explanation}}
|
||||
}
|
||||
|
||||
type messageOutOfOrder struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// MessageOutOfOrder returns a messagOutOfOrder PeerBehaviour.
|
||||
func MessageOutOfOrder(peerID p2p.NodeID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: messageOutOfOrder{explanation}}
|
||||
}
|
||||
|
||||
type consensusVote struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// ConsensusVote returns a consensusVote PeerBehaviour.
|
||||
func ConsensusVote(peerID p2p.NodeID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: consensusVote{explanation}}
|
||||
}
|
||||
|
||||
type blockPart struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// BlockPart returns blockPart PeerBehaviour.
|
||||
func BlockPart(peerID p2p.NodeID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: blockPart{explanation}}
|
||||
}
|
||||
@@ -1,205 +0,0 @@
|
||||
package behaviour_test
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
bh "github.com/tendermint/tendermint/behaviour"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// TestMockReporter tests the MockReporter's ability to store reported
|
||||
// peer behaviour in memory indexed by the peerID.
|
||||
func TestMockReporter(t *testing.T) {
|
||||
var peerID p2p.NodeID = "MockPeer"
|
||||
pr := bh.NewMockReporter()
|
||||
|
||||
behaviours := pr.GetBehaviours(peerID)
|
||||
if len(behaviours) != 0 {
|
||||
t.Error("Expected to have no behaviours reported")
|
||||
}
|
||||
|
||||
badMessage := bh.BadMessage(peerID, "bad message")
|
||||
if err := pr.Report(badMessage); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
behaviours = pr.GetBehaviours(peerID)
|
||||
if len(behaviours) != 1 {
|
||||
t.Error("Expected the peer have one reported behaviour")
|
||||
}
|
||||
|
||||
if behaviours[0] != badMessage {
|
||||
t.Error("Expected Bad Message to have been reported")
|
||||
}
|
||||
}
|
||||
|
||||
type scriptItem struct {
|
||||
peerID p2p.NodeID
|
||||
behaviour bh.PeerBehaviour
|
||||
}
|
||||
|
||||
// equalBehaviours returns true if a and b contain the same PeerBehaviours with
|
||||
// the same freequencies and otherwise false.
|
||||
func equalBehaviours(a []bh.PeerBehaviour, b []bh.PeerBehaviour) bool {
|
||||
aHistogram := map[bh.PeerBehaviour]int{}
|
||||
bHistogram := map[bh.PeerBehaviour]int{}
|
||||
|
||||
for _, behaviour := range a {
|
||||
aHistogram[behaviour]++
|
||||
}
|
||||
|
||||
for _, behaviour := range b {
|
||||
bHistogram[behaviour]++
|
||||
}
|
||||
|
||||
if len(aHistogram) != len(bHistogram) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, behaviour := range a {
|
||||
if aHistogram[behaviour] != bHistogram[behaviour] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for _, behaviour := range b {
|
||||
if bHistogram[behaviour] != aHistogram[behaviour] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// TestEqualPeerBehaviours tests that equalBehaviours can tell that two slices
|
||||
// of peer behaviours can be compared for the behaviours they contain and the
|
||||
// freequencies that those behaviours occur.
|
||||
func TestEqualPeerBehaviours(t *testing.T) {
|
||||
var (
|
||||
peerID p2p.NodeID = "MockPeer"
|
||||
consensusVote = bh.ConsensusVote(peerID, "voted")
|
||||
blockPart = bh.BlockPart(peerID, "blocked")
|
||||
equals = []struct {
|
||||
left []bh.PeerBehaviour
|
||||
right []bh.PeerBehaviour
|
||||
}{
|
||||
// Empty sets
|
||||
{[]bh.PeerBehaviour{}, []bh.PeerBehaviour{}},
|
||||
// Single behaviours
|
||||
{[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{consensusVote}},
|
||||
// Equal Frequencies
|
||||
{[]bh.PeerBehaviour{consensusVote, consensusVote},
|
||||
[]bh.PeerBehaviour{consensusVote, consensusVote}},
|
||||
// Equal frequencies different orders
|
||||
{[]bh.PeerBehaviour{consensusVote, blockPart},
|
||||
[]bh.PeerBehaviour{blockPart, consensusVote}},
|
||||
}
|
||||
unequals = []struct {
|
||||
left []bh.PeerBehaviour
|
||||
right []bh.PeerBehaviour
|
||||
}{
|
||||
// Comparing empty sets to non empty sets
|
||||
{[]bh.PeerBehaviour{}, []bh.PeerBehaviour{consensusVote}},
|
||||
// Different behaviours
|
||||
{[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{blockPart}},
|
||||
// Same behaviour with different frequencies
|
||||
{[]bh.PeerBehaviour{consensusVote},
|
||||
[]bh.PeerBehaviour{consensusVote, consensusVote}},
|
||||
}
|
||||
)
|
||||
|
||||
for _, test := range equals {
|
||||
if !equalBehaviours(test.left, test.right) {
|
||||
t.Errorf("expected %#v and %#v to be equal", test.left, test.right)
|
||||
}
|
||||
}
|
||||
|
||||
for _, test := range unequals {
|
||||
if equalBehaviours(test.left, test.right) {
|
||||
t.Errorf("expected %#v and %#v to be unequal", test.left, test.right)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestPeerBehaviourConcurrency constructs a scenario in which
|
||||
// multiple goroutines are using the same MockReporter instance.
|
||||
// This test reproduces the conditions in which MockReporter will
|
||||
// be used within a Reactor `Receive` method tests to ensure thread safety.
|
||||
func TestMockPeerBehaviourReporterConcurrency(t *testing.T) {
|
||||
var (
|
||||
behaviourScript = []struct {
|
||||
peerID p2p.NodeID
|
||||
behaviours []bh.PeerBehaviour
|
||||
}{
|
||||
{"1", []bh.PeerBehaviour{bh.ConsensusVote("1", "")}},
|
||||
{"2", []bh.PeerBehaviour{bh.ConsensusVote("2", ""), bh.ConsensusVote("2", ""), bh.ConsensusVote("2", "")}},
|
||||
{
|
||||
"3",
|
||||
[]bh.PeerBehaviour{bh.BlockPart("3", ""),
|
||||
bh.ConsensusVote("3", ""),
|
||||
bh.BlockPart("3", ""),
|
||||
bh.ConsensusVote("3", "")}},
|
||||
{
|
||||
"4",
|
||||
[]bh.PeerBehaviour{bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", "")}},
|
||||
{
|
||||
"5",
|
||||
[]bh.PeerBehaviour{bh.BlockPart("5", ""),
|
||||
bh.ConsensusVote("5", ""),
|
||||
bh.BlockPart("5", ""),
|
||||
bh.ConsensusVote("5", "")}},
|
||||
}
|
||||
)
|
||||
|
||||
var receiveWg sync.WaitGroup
|
||||
pr := bh.NewMockReporter()
|
||||
scriptItems := make(chan scriptItem)
|
||||
done := make(chan int)
|
||||
numConsumers := 3
|
||||
for i := 0; i < numConsumers; i++ {
|
||||
receiveWg.Add(1)
|
||||
go func() {
|
||||
defer receiveWg.Done()
|
||||
for {
|
||||
select {
|
||||
case pb := <-scriptItems:
|
||||
if err := pr.Report(pb.behaviour); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
var sendingWg sync.WaitGroup
|
||||
sendingWg.Add(1)
|
||||
go func() {
|
||||
defer sendingWg.Done()
|
||||
for _, item := range behaviourScript {
|
||||
for _, reason := range item.behaviours {
|
||||
scriptItems <- scriptItem{item.peerID, reason}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
sendingWg.Wait()
|
||||
|
||||
for i := 0; i < numConsumers; i++ {
|
||||
done <- 1
|
||||
}
|
||||
|
||||
receiveWg.Wait()
|
||||
|
||||
for _, items := range behaviourScript {
|
||||
reported := pr.GetBehaviours(items.peerID)
|
||||
if !equalBehaviours(reported, items.behaviours) {
|
||||
t.Errorf("expected peer %s to have behaved \nExpected: %#v \nGot %#v \n",
|
||||
items.peerID, items.behaviours, reported)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
/*
|
||||
Package blockchain provides two implementations of the fast-sync protocol.
|
||||
|
||||
- v0 was the very first implementation. it's battle tested, but does not have a
|
||||
lot of test coverage.
|
||||
- v2 is the newest implementation, with a focus on testability and readability.
|
||||
|
||||
Check out ADR-40 for the formal model and requirements.
|
||||
|
||||
# Termination criteria
|
||||
|
||||
1. the maximum peer height is reached
|
||||
2. termination timeout is triggered, which is set if the peer set is empty or
|
||||
there are no pending requests.
|
||||
|
||||
*/
|
||||
package blockchain
|
||||
@@ -1,108 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// NOTE: keep up to date with bcproto.BlockResponse
|
||||
BlockResponseMessagePrefixSize = 4
|
||||
BlockResponseMessageFieldKeySize = 1
|
||||
MaxMsgSize = types.MaxBlockSizeBytes +
|
||||
BlockResponseMessagePrefixSize +
|
||||
BlockResponseMessageFieldKeySize
|
||||
)
|
||||
|
||||
// EncodeMsg encodes a Protobuf message
|
||||
func EncodeMsg(pb proto.Message) ([]byte, error) {
|
||||
msg := bcproto.Message{}
|
||||
|
||||
switch pb := pb.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
msg.Sum = &bcproto.Message_BlockRequest{BlockRequest: pb}
|
||||
case *bcproto.BlockResponse:
|
||||
msg.Sum = &bcproto.Message_BlockResponse{BlockResponse: pb}
|
||||
case *bcproto.NoBlockResponse:
|
||||
msg.Sum = &bcproto.Message_NoBlockResponse{NoBlockResponse: pb}
|
||||
case *bcproto.StatusRequest:
|
||||
msg.Sum = &bcproto.Message_StatusRequest{StatusRequest: pb}
|
||||
case *bcproto.StatusResponse:
|
||||
msg.Sum = &bcproto.Message_StatusResponse{StatusResponse: pb}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", pb)
|
||||
}
|
||||
|
||||
bz, err := proto.Marshal(&msg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to marshal %T: %w", pb, err)
|
||||
}
|
||||
|
||||
return bz, nil
|
||||
}
|
||||
|
||||
// DecodeMsg decodes a Protobuf message.
|
||||
func DecodeMsg(bz []byte) (proto.Message, error) {
|
||||
pb := &bcproto.Message{}
|
||||
|
||||
err := proto.Unmarshal(bz, pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch msg := pb.Sum.(type) {
|
||||
case *bcproto.Message_BlockRequest:
|
||||
return msg.BlockRequest, nil
|
||||
case *bcproto.Message_BlockResponse:
|
||||
return msg.BlockResponse, nil
|
||||
case *bcproto.Message_NoBlockResponse:
|
||||
return msg.NoBlockResponse, nil
|
||||
case *bcproto.Message_StatusRequest:
|
||||
return msg.StatusRequest, nil
|
||||
case *bcproto.Message_StatusResponse:
|
||||
return msg.StatusResponse, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateMsg validates a message.
|
||||
func ValidateMsg(pb proto.Message) error {
|
||||
if pb == nil {
|
||||
return errors.New("message cannot be nil")
|
||||
}
|
||||
|
||||
switch msg := pb.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
if msg.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
case *bcproto.BlockResponse:
|
||||
// validate basic is called later when converting from proto
|
||||
return nil
|
||||
case *bcproto.NoBlockResponse:
|
||||
if msg.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
case *bcproto.StatusResponse:
|
||||
if msg.Base < 0 {
|
||||
return errors.New("negative Base")
|
||||
}
|
||||
if msg.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
if msg.Base > msg.Height {
|
||||
return fmt.Errorf("base %v cannot be greater than height %v", msg.Base, msg.Height)
|
||||
}
|
||||
case *bcproto.StatusRequest:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unknown message type %T", msg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,461 +0,0 @@
|
||||
package v0
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
|
||||
BlockchainChannel = byte(0x40)
|
||||
|
||||
trySyncIntervalMS = 10
|
||||
|
||||
// stop syncing when last block's time is
|
||||
// within this much of the system time.
|
||||
// stopSyncingDurationMinutes = 10
|
||||
|
||||
// ask for best height every 10s
|
||||
statusUpdateIntervalSeconds = 10
|
||||
// check if we should switch to consensus reactor
|
||||
switchToConsensusIntervalSeconds = 1
|
||||
|
||||
// switch to consensus after this duration of inactivity
|
||||
syncTimeout = 60 * time.Second
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// for when we switch from blockchain reactor and fast sync to
|
||||
// the consensus machine
|
||||
SwitchToConsensus(state sm.State, skipWAL bool)
|
||||
}
|
||||
|
||||
type peerError struct {
|
||||
err error
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (e peerError) Error() string {
|
||||
return fmt.Sprintf("error with peer %v: %s", e.peerID, e.err.Error())
|
||||
}
|
||||
|
||||
// BlockchainReactor handles long-term catchup syncing.
|
||||
type BlockchainReactor struct {
|
||||
p2p.BaseReactor
|
||||
|
||||
// immutable
|
||||
initialState sm.State
|
||||
|
||||
blockExec *sm.BlockExecutor
|
||||
store *store.BlockStore
|
||||
pool *BlockPool
|
||||
fastSync bool
|
||||
|
||||
requestsCh <-chan BlockRequest
|
||||
errorsCh <-chan peerError
|
||||
}
|
||||
|
||||
// NewBlockchainReactor returns new reactor instance.
|
||||
func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
|
||||
fastSync bool) *BlockchainReactor {
|
||||
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
|
||||
store.Height()))
|
||||
}
|
||||
|
||||
requestsCh := make(chan BlockRequest, maxTotalRequesters)
|
||||
|
||||
const capacity = 1000 // must be bigger than peers count
|
||||
errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock
|
||||
|
||||
startHeight := store.Height() + 1
|
||||
if startHeight == 1 {
|
||||
startHeight = state.InitialHeight
|
||||
}
|
||||
pool := NewBlockPool(startHeight, requestsCh, errorsCh)
|
||||
|
||||
bcR := &BlockchainReactor{
|
||||
initialState: state,
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: pool,
|
||||
fastSync: fastSync,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
}
|
||||
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
|
||||
return bcR
|
||||
}
|
||||
|
||||
// SetLogger implements service.Service by setting the logger on reactor and pool.
|
||||
func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
|
||||
bcR.BaseService.Logger = l
|
||||
bcR.pool.Logger = l
|
||||
}
|
||||
|
||||
// OnStart implements service.Service.
|
||||
func (bcR *BlockchainReactor) OnStart() error {
|
||||
if bcR.fastSync {
|
||||
err := bcR.pool.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go bcR.poolRoutine(false)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
|
||||
func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error {
|
||||
bcR.fastSync = true
|
||||
bcR.initialState = state
|
||||
|
||||
bcR.pool.height = state.LastBlockHeight + 1
|
||||
err := bcR.pool.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go bcR.poolRoutine(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements service.Service.
|
||||
func (bcR *BlockchainReactor) OnStop() {
|
||||
if bcR.fastSync {
|
||||
if err := bcR.pool.Stop(); err != nil {
|
||||
bcR.Logger.Error("Error stopping pool", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetChannels implements Reactor
|
||||
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: BlockchainChannel,
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 1000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height()})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
_ = peer.Send(BlockchainChannel, msgBytes)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
// bcStatusResponseMessage from the peer and call pool.SetPeerRange
|
||||
}
|
||||
|
||||
// RemovePeer implements Reactor by removing peer from the pool.
|
||||
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
bcR.pool.RemovePeer(peer.ID())
|
||||
}
|
||||
|
||||
// respondToPeer loads a block and sends it to the requesting peer,
|
||||
// if we have it. Otherwise, we'll respond saying we don't have it.
|
||||
func (bcR *BlockchainReactor) respondToPeer(msg *bcproto.BlockRequest,
|
||||
src p2p.Peer) (queued bool) {
|
||||
|
||||
block := bcR.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
bl, err := block.ToProto()
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: bl})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
// XXX: do not call any methods that can block or incur heavy processing.
|
||||
// https://github.com/tendermint/tendermint/issues/2888
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
logger := bcR.Logger.With("src", src, "chId", chID)
|
||||
|
||||
msg, err := bc.DecodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
logger.Error("Error decoding message", "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = bc.ValidateMsg(msg); err != nil {
|
||||
logger.Error("Peer sent us invalid msg", "msg", msg, "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Debug("Receive", "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
bcR.respondToPeer(msg, src)
|
||||
case *bcproto.BlockResponse:
|
||||
bi, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
logger.Error("Block content is invalid", "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
bcR.pool.AddBlock(src.ID(), bi, len(msgBytes))
|
||||
case *bcproto.StatusRequest:
|
||||
// Send peer our state.
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
|
||||
Height: bcR.store.Height(),
|
||||
Base: bcR.store.Base(),
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error("could not convert msg to protobut", "err", err)
|
||||
return
|
||||
}
|
||||
src.TrySend(BlockchainChannel, msgBytes)
|
||||
case *bcproto.StatusResponse:
|
||||
// Got a peer status. Unverified.
|
||||
bcR.pool.SetPeerRange(src.ID(), msg.Base, msg.Height)
|
||||
case *bcproto.NoBlockResponse:
|
||||
logger.Debug("Peer does not have requested block", "height", msg.Height)
|
||||
default:
|
||||
logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
}
|
||||
|
||||
// Handle messages from the poolReactor telling the reactor what to do.
|
||||
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
|
||||
func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) {
|
||||
var (
|
||||
trySyncTicker = time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
statusUpdateTicker = time.NewTicker(statusUpdateIntervalSeconds * time.Second)
|
||||
switchToConsensusTicker = time.NewTicker(switchToConsensusIntervalSeconds * time.Second)
|
||||
|
||||
blocksSynced = uint64(0)
|
||||
|
||||
chainID = bcR.initialState.ChainID
|
||||
state = bcR.initialState
|
||||
|
||||
lastHundred = time.Now()
|
||||
lastRate = 0.0
|
||||
|
||||
didProcessCh = make(chan struct{}, 1)
|
||||
)
|
||||
defer trySyncTicker.Stop()
|
||||
defer statusUpdateTicker.Stop()
|
||||
defer switchToConsensusTicker.Stop()
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
|
||||
case <-bcR.Quit():
|
||||
return
|
||||
|
||||
case <-bcR.pool.Quit():
|
||||
return
|
||||
|
||||
case request := <-bcR.requestsCh:
|
||||
peer := bcR.Switch.Peers().Get(request.PeerID)
|
||||
if peer == nil {
|
||||
bcR.Logger.Debug("Can't send request: no peer", "peer_id", request.PeerID)
|
||||
continue
|
||||
}
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: request.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert BlockRequest to proto", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height)
|
||||
}
|
||||
|
||||
case err := <-bcR.errorsCh:
|
||||
peer := bcR.Switch.Peers().Get(err.peerID)
|
||||
if peer != nil {
|
||||
bcR.Switch.StopPeerForError(peer, err)
|
||||
}
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// ask for status updates
|
||||
go bcR.BroadcastStatusRequest()
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
FOR_LOOP:
|
||||
for {
|
||||
select {
|
||||
|
||||
case <-switchToConsensusTicker.C:
|
||||
var (
|
||||
height, numPending, lenRequesters = bcR.pool.GetStatus()
|
||||
outbound, inbound, _ = bcR.Switch.NumPeers()
|
||||
lastAdvance = bcR.pool.LastAdvance()
|
||||
)
|
||||
|
||||
bcR.Logger.Debug("Consensus ticker",
|
||||
"numPending", numPending,
|
||||
"total", lenRequesters)
|
||||
|
||||
switch {
|
||||
case bcR.pool.IsCaughtUp():
|
||||
bcR.Logger.Info("Time to switch to consensus reactor!", "height", height)
|
||||
case time.Since(lastAdvance) > syncTimeout:
|
||||
bcR.Logger.Error(fmt.Sprintf("No progress since last advance: %v", lastAdvance))
|
||||
default:
|
||||
bcR.Logger.Info("Not caught up yet",
|
||||
"height", height, "max_peer_height", bcR.pool.MaxPeerHeight(),
|
||||
"num_peers", outbound+inbound,
|
||||
"timeout_in", syncTimeout-time.Since(lastAdvance))
|
||||
continue
|
||||
}
|
||||
|
||||
if err := bcR.pool.Stop(); err != nil {
|
||||
bcR.Logger.Error("Error stopping pool", "err", err)
|
||||
}
|
||||
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
|
||||
}
|
||||
|
||||
break FOR_LOOP
|
||||
|
||||
case <-trySyncTicker.C: // chan time
|
||||
select {
|
||||
case didProcessCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
case <-didProcessCh:
|
||||
// NOTE: It is a subtle mistake to process more than a single block
|
||||
// at a time (e.g. 10) here, because we only TrySend 1 request per
|
||||
// loop. The ratio mismatch can result in starving of blocks, a
|
||||
// sudden burst of requests and responses, and repeat.
|
||||
// Consequently, it is better to split these routines rather than
|
||||
// coupling them as it's written here. TODO uncouple from request
|
||||
// routine.
|
||||
|
||||
// See if there are any blocks to sync.
|
||||
first, second := bcR.pool.PeekTwoBlocks()
|
||||
// bcR.Logger.Info("TrySync peeked", "first", first, "second", second)
|
||||
if first == nil || second == nil {
|
||||
// We need both to sync the first block.
|
||||
continue FOR_LOOP
|
||||
} else {
|
||||
// Try again quickly next loop.
|
||||
didProcessCh <- struct{}{}
|
||||
}
|
||||
|
||||
var (
|
||||
firstParts = first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstPartSetHeader = firstParts.Header()
|
||||
firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
|
||||
)
|
||||
|
||||
// Finally, verify the first block using the second's commit
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
err := state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("invalid last commit: %w", err)
|
||||
bcR.Logger.Error(err.Error(),
|
||||
"last_commit", second.LastCommit, "block_id", firstID, "height", first.Height)
|
||||
|
||||
peerID := bcR.pool.RedoRequest(first.Height)
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
if peer != nil {
|
||||
// NOTE: we've already removed the peer's request, but we still need
|
||||
// to clean up the rest.
|
||||
bcR.Switch.StopPeerForError(peer, err)
|
||||
}
|
||||
|
||||
peerID2 := bcR.pool.RedoRequest(second.Height)
|
||||
if peerID2 != peerID {
|
||||
if peer2 := bcR.Switch.Peers().Get(peerID2); peer2 != nil {
|
||||
bcR.Switch.StopPeerForError(peer2, err)
|
||||
}
|
||||
}
|
||||
|
||||
continue FOR_LOOP
|
||||
} else {
|
||||
bcR.pool.PopRequest()
|
||||
|
||||
// TODO: batch saves so we dont persist to disk every block
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
// TODO: same thing for app - but we would need a way to get the hash
|
||||
// without persisting the state.
|
||||
var err error
|
||||
state, _, err = bcR.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO This is bad, are we zombie?
|
||||
panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
blocksSynced++
|
||||
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
bcR.Logger.Info("Fast Sync Rate",
|
||||
"height", bcR.pool.height, "max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
}
|
||||
continue FOR_LOOP
|
||||
|
||||
case <-bcR.Quit():
|
||||
break FOR_LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BroadcastStatusRequest broadcasts `BlockStore` base and height.
|
||||
func (bcR *BlockchainReactor) BroadcastStatusRequest() {
|
||||
bm, err := bc.EncodeMsg(&bcproto.StatusRequest{})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert StatusRequest to proto", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// We don't care about whenever broadcast is successful or not.
|
||||
_ = bcR.Switch.Broadcast(BlockchainChannel, bm)
|
||||
}
|
||||
@@ -1,328 +0,0 @@
|
||||
package v0
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mempool/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
var config *cfg.Config
|
||||
|
||||
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
for i := 0; i < numValidators; i++ {
|
||||
val, privVal := types.RandValidator(randPower, minPower)
|
||||
validators[i] = types.GenesisValidator{
|
||||
PubKey: val.PubKey,
|
||||
Power: val.VotingPower,
|
||||
}
|
||||
privValidators[i] = privVal
|
||||
}
|
||||
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
||||
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: config.ChainID(),
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
type BlockchainReactorPair struct {
|
||||
reactor *BlockchainReactor
|
||||
app proxy.AppConns
|
||||
}
|
||||
|
||||
func newBlockchainReactor(
|
||||
logger log.Logger,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64) BlockchainReactorPair {
|
||||
if len(privVals) != 1 {
|
||||
panic("only support one validator")
|
||||
}
|
||||
|
||||
app := &testApp{}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error start app: %w", err))
|
||||
}
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
|
||||
}
|
||||
|
||||
// Make the BlockchainReactor itself.
|
||||
// NOTE we have to create and commit the blocks first because
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// let's add some blocks in
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil)
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
|
||||
vote, err := types.MakeVote(
|
||||
lastBlock.Header.Height,
|
||||
lastBlockMeta.BlockID,
|
||||
state.Validators,
|
||||
privVals[0],
|
||||
lastBlock.Header.ChainID,
|
||||
time.Now(),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
lastCommit = types.NewCommit(vote.Height, vote.Round,
|
||||
lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error apply block: %w", err))
|
||||
}
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
|
||||
bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||
bcReactor.SetLogger(logger.With("module", "blockchain"))
|
||||
|
||||
return BlockchainReactorPair{bcReactor, proxyApp}
|
||||
}
|
||||
|
||||
func TestNoBlockResponse(t *testing.T) {
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
maxBlockHeight := int64(65)
|
||||
|
||||
reactorPairs := make([]BlockchainReactorPair, 2)
|
||||
|
||||
reactorPairs[0] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, maxBlockHeight)
|
||||
reactorPairs[1] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
|
||||
|
||||
p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
err := r.reactor.Stop()
|
||||
require.NoError(t, err)
|
||||
err = r.app.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
tests := []struct {
|
||||
height int64
|
||||
existent bool
|
||||
}{
|
||||
{maxBlockHeight + 2, false},
|
||||
{10, true},
|
||||
{1, true},
|
||||
{100, false},
|
||||
}
|
||||
|
||||
for {
|
||||
if reactorPairs[1].reactor.pool.IsCaughtUp() {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
assert.Equal(t, maxBlockHeight, reactorPairs[0].reactor.store.Height())
|
||||
|
||||
for _, tt := range tests {
|
||||
block := reactorPairs[1].reactor.store.LoadBlock(tt.height)
|
||||
if tt.existent {
|
||||
assert.True(t, block != nil)
|
||||
} else {
|
||||
assert.True(t, block == nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: This is too hard to test without
|
||||
// an easy way to add test peer to switch
|
||||
// or without significant refactoring of the module.
|
||||
// Alternatively we could actually dial a TCP conn but
|
||||
// that seems extreme.
|
||||
func TestBadBlockStopsPeer(t *testing.T) {
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
maxBlockHeight := int64(148)
|
||||
|
||||
// Other chain needs a different validator set
|
||||
otherGenDoc, otherPrivVals := randGenesisDoc(1, false, 30)
|
||||
otherChain := newBlockchainReactor(log.TestingLogger(), otherGenDoc, otherPrivVals, maxBlockHeight)
|
||||
|
||||
defer func() {
|
||||
err := otherChain.reactor.Stop()
|
||||
require.Error(t, err)
|
||||
err = otherChain.app.Stop()
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
|
||||
reactorPairs := make([]BlockchainReactorPair, 4)
|
||||
|
||||
reactorPairs[0] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, maxBlockHeight)
|
||||
reactorPairs[1] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
|
||||
reactorPairs[2] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
|
||||
reactorPairs[3] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
|
||||
|
||||
switches := p2p.MakeConnectedSwitches(config.P2P, 4, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
err := r.reactor.Stop()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = r.app.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
time.Sleep(1 * time.Second)
|
||||
caughtUp := true
|
||||
for _, r := range reactorPairs {
|
||||
if !r.reactor.pool.IsCaughtUp() {
|
||||
caughtUp = false
|
||||
}
|
||||
}
|
||||
if caughtUp {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// at this time, reactors[0-3] is the newest
|
||||
assert.Equal(t, 3, reactorPairs[1].reactor.Switch.Peers().Size())
|
||||
|
||||
// Mark reactorPairs[3] as an invalid peer. Fiddling with .store without a mutex is a data
|
||||
// race, but can't be easily avoided.
|
||||
reactorPairs[3].reactor.store = otherChain.reactor.store
|
||||
|
||||
lastReactorPair := newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
|
||||
reactorPairs = append(reactorPairs, lastReactorPair)
|
||||
|
||||
switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[len(reactorPairs)-1].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)...)
|
||||
|
||||
for i := 0; i < len(reactorPairs)-1; i++ {
|
||||
p2p.Connect2Switches(switches, i, len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
for {
|
||||
if lastReactorPair.reactor.pool.IsCaughtUp() || lastReactorPair.reactor.Switch.Peers().Size() == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
assert.True(t, lastReactorPair.reactor.Switch.Peers().Size() < len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// utility funcs
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
|
||||
type testApp struct {
|
||||
abci.BaseApplication
|
||||
}
|
||||
|
||||
var _ abci.Application = (*testApp)(nil)
|
||||
|
||||
func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) {
|
||||
return abci.ResponseInfo{}
|
||||
}
|
||||
|
||||
func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock {
|
||||
return abci.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock {
|
||||
return abci.ResponseEndBlock{}
|
||||
}
|
||||
|
||||
func (app *testApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
|
||||
return abci.ResponseDeliverTx{Events: []abci.Event{}}
|
||||
}
|
||||
|
||||
func (app *testApp) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
|
||||
return abci.ResponseCheckTx{}
|
||||
}
|
||||
|
||||
func (app *testApp) Commit() abci.ResponseCommit {
|
||||
return abci.ResponseCommit{}
|
||||
}
|
||||
|
||||
func (app *testApp) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) {
|
||||
return
|
||||
}
|
||||
@@ -46,9 +46,8 @@ func main() {
|
||||
rootCA = flag.String("rootcafile", "", "absolute path to root CA")
|
||||
prometheusAddr = flag.String("prometheus-addr", "", "address for prometheus endpoint (host:port)")
|
||||
|
||||
logger = log.NewTMLogger(
|
||||
log.NewSyncWriter(os.Stdout),
|
||||
).With("module", "priv_val")
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false).
|
||||
With("module", "priv_val")
|
||||
)
|
||||
flag.Parse()
|
||||
|
||||
@@ -64,7 +63,11 @@ func main() {
|
||||
"rootCA", *rootCA,
|
||||
)
|
||||
|
||||
pv := privval.LoadFilePV(*privValKeyPath, *privValStatePath)
|
||||
pv, err := privval.LoadFilePV(*privValKeyPath, *privValStatePath)
|
||||
if err != nil {
|
||||
fmt.Fprint(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
opts := []grpc.ServerOption{}
|
||||
if !*insecure {
|
||||
|
||||
@@ -1,8 +1,6 @@
|
||||
package debug
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -17,7 +15,7 @@ var (
|
||||
flagProfAddr = "pprof-laddr"
|
||||
flagFrequency = "frequency"
|
||||
|
||||
logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
)
|
||||
|
||||
// DebugCmd defines the root command containing subcommands that assist in
|
||||
|
||||
@@ -59,7 +59,7 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
rpc, err := rpchttp.New(nodeRPCAddr, "/websocket")
|
||||
rpc, err := rpchttp.New(nodeRPCAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new http client: %w", err)
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ go-routine state, and the node's WAL and config information. This aggregated dat
|
||||
is packaged into a compressed archive.
|
||||
|
||||
Example:
|
||||
$ tendermint debug 34255 /path/to/tm-debug.zip`,
|
||||
$ tendermint debug kill 34255 /path/to/tm-debug.zip`,
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: killCmdHandler,
|
||||
}
|
||||
@@ -44,7 +44,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
return errors.New("invalid output file")
|
||||
}
|
||||
|
||||
rpc, err := rpchttp.New(nodeRPCAddr, "/websocket")
|
||||
rpc, err := rpchttp.New(nodeRPCAddr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create new http client: %w", err)
|
||||
}
|
||||
@@ -79,7 +79,11 @@ func killCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
|
||||
logger.Info("copying node WAL...")
|
||||
if err := copyWAL(conf, tmpDir); err != nil {
|
||||
return err
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("node WAL does not exist; continuing...")
|
||||
}
|
||||
|
||||
logger.Info("copying node configuration...")
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// GenNodeKeyCmd allows the generation of a node key. It prints JSON-encoded
|
||||
@@ -20,7 +20,7 @@ var GenNodeKeyCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func genNodeKey(cmd *cobra.Command, args []string) error {
|
||||
nodeKey := p2p.GenNodeKey()
|
||||
nodeKey := types.GenNodeKey()
|
||||
|
||||
bz, err := tmjson.Marshal(nodeKey)
|
||||
if err != nil {
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
@@ -8,18 +10,19 @@ import (
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
// InitFilesCmd initialises a fresh Tendermint Core instance.
|
||||
// InitFilesCmd initializes a fresh Tendermint Core instance.
|
||||
var InitFilesCmd = &cobra.Command{
|
||||
Use: "init",
|
||||
Short: "Initialize Tendermint",
|
||||
RunE: initFiles,
|
||||
Use: "init [full|validator|seed]",
|
||||
Short: "Initializes a Tendermint node",
|
||||
ValidArgs: []string{"full", "validator", "seed"},
|
||||
// We allow for zero args so we can throw a more informative error
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: initFiles,
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -32,36 +35,47 @@ func init() {
|
||||
}
|
||||
|
||||
func initFiles(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
return errors.New("must specify a node type: tendermint init [validator|full|seed]")
|
||||
}
|
||||
config.Mode = args[0]
|
||||
return initFilesWithConfig(config)
|
||||
}
|
||||
|
||||
func initFilesWithConfig(config *cfg.Config) error {
|
||||
// private validator
|
||||
privValKeyFile := config.PrivValidatorKeyFile()
|
||||
privValStateFile := config.PrivValidatorStateFile()
|
||||
var (
|
||||
pv *privval.FilePV
|
||||
err error
|
||||
)
|
||||
if tmos.FileExists(privValKeyFile) {
|
||||
pv = privval.LoadFilePV(privValKeyFile, privValStateFile)
|
||||
logger.Info("Found private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
} else {
|
||||
pv, err = privval.GenFilePV(privValKeyFile, privValStateFile, keyType)
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
if config.Mode == cfg.ModeValidator {
|
||||
// private validator
|
||||
privValKeyFile := config.PrivValidator.KeyFile()
|
||||
privValStateFile := config.PrivValidator.StateFile()
|
||||
if tmos.FileExists(privValKeyFile) {
|
||||
pv, err = privval.LoadFilePV(privValKeyFile, privValStateFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("Found private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
} else {
|
||||
pv, err = privval.GenFilePV(privValKeyFile, privValStateFile, keyType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
}
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
}
|
||||
|
||||
nodeKeyFile := config.NodeKeyFile()
|
||||
if tmos.FileExists(nodeKeyFile) {
|
||||
logger.Info("Found node key", "path", nodeKeyFile)
|
||||
} else {
|
||||
if _, err := p2p.LoadOrGenNodeKey(nodeKeyFile); err != nil {
|
||||
if _, err := types.LoadOrGenNodeKey(nodeKeyFile); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Info("Generated node key", "path", nodeKeyFile)
|
||||
@@ -79,19 +93,26 @@ func initFilesWithConfig(config *cfg.Config) error {
|
||||
ConsensusParams: types.DefaultConsensusParams(),
|
||||
}
|
||||
if keyType == "secp256k1" {
|
||||
genDoc.ConsensusParams.Validator = tmproto.ValidatorParams{
|
||||
genDoc.ConsensusParams.Validator = types.ValidatorParams{
|
||||
PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1},
|
||||
}
|
||||
}
|
||||
pubKey, err := pv.GetPubKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get pubkey: %w", err)
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout)
|
||||
defer cancel()
|
||||
|
||||
// if this is a validator we add it to genesis
|
||||
if pv != nil {
|
||||
pubKey, err := pv.GetPubKey(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get pubkey: %w", err)
|
||||
}
|
||||
genDoc.Validators = []types.GenesisValidator{{
|
||||
Address: pubKey.Address(),
|
||||
PubKey: pubKey,
|
||||
Power: 10,
|
||||
}}
|
||||
}
|
||||
genDoc.Validators = []types.GenesisValidator{{
|
||||
Address: pubKey.Address(),
|
||||
PubKey: pubKey,
|
||||
Power: 10,
|
||||
}}
|
||||
|
||||
if err := genDoc.SaveAs(genFile); err != nil {
|
||||
return err
|
||||
@@ -99,5 +120,9 @@ func initFilesWithConfig(config *cfg.Config) error {
|
||||
logger.Info("Generated genesis file", "path", genFile)
|
||||
}
|
||||
|
||||
// write config file
|
||||
cfg.WriteConfigFile(config.RootDir, config)
|
||||
logger.Info("Generated config", "mode", config.Mode)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
64
cmd/tendermint/commands/key_migrate.go
Normal file
64
cmd/tendermint/commands/key_migrate.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/scripts/keymigrate"
|
||||
)
|
||||
|
||||
func MakeKeyMigrateCommand() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "key-migrate",
|
||||
Short: "Run Database key migration",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
ctx, cancel := context.WithCancel(cmd.Context())
|
||||
defer cancel()
|
||||
|
||||
contexts := []string{
|
||||
// this is ordered to put the
|
||||
// (presumably) biggest/most important
|
||||
// subsets first.
|
||||
"blockstore",
|
||||
"state",
|
||||
"peerstore",
|
||||
"tx_index",
|
||||
"evidence",
|
||||
"light",
|
||||
}
|
||||
|
||||
for idx, dbctx := range contexts {
|
||||
logger.Info("beginning a key migration",
|
||||
"dbctx", dbctx,
|
||||
"num", idx+1,
|
||||
"total", len(contexts),
|
||||
)
|
||||
|
||||
db, err := cfg.DefaultDBProvider(&cfg.DBContext{
|
||||
ID: dbctx,
|
||||
Config: config,
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("constructing database handle: %w", err)
|
||||
}
|
||||
|
||||
if err = keymigrate.Migrate(ctx, db); err != nil {
|
||||
return fmt.Errorf("running migration for context %q: %w",
|
||||
dbctx, err)
|
||||
}
|
||||
}
|
||||
|
||||
logger.Info("completed database migration successfully")
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
// allow database info to be overridden via cli
|
||||
addDBFlags(cmd)
|
||||
|
||||
return cmd
|
||||
}
|
||||
@@ -1,14 +1,12 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -16,7 +14,6 @@ import (
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/merkle"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
@@ -24,7 +21,6 @@ import (
|
||||
lproxy "github.com/tendermint/tendermint/light/proxy"
|
||||
lrpc "github.com/tendermint/tendermint/light/rpc"
|
||||
dbs "github.com/tendermint/tendermint/light/store/db"
|
||||
rpchttp "github.com/tendermint/tendermint/rpc/client/http"
|
||||
rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server"
|
||||
)
|
||||
|
||||
@@ -70,7 +66,8 @@ var (
|
||||
trustedHash []byte
|
||||
trustLevelStr string
|
||||
|
||||
verbose bool
|
||||
logLevel string
|
||||
logFormat string
|
||||
|
||||
primaryKey = []byte("primary")
|
||||
witnessesKey = []byte("witnesses")
|
||||
@@ -94,7 +91,8 @@ func init() {
|
||||
"trusting period that headers can be verified within. Should be significantly less than the unbonding period")
|
||||
LightCmd.Flags().Int64Var(&trustedHeight, "height", 1, "Trusted header's height")
|
||||
LightCmd.Flags().BytesHexVar(&trustedHash, "hash", []byte{}, "Trusted header's hash")
|
||||
LightCmd.Flags().BoolVar(&verbose, "verbose", false, "Verbose output")
|
||||
LightCmd.Flags().StringVar(&logLevel, "log-level", log.LogLevelInfo, "The logging level (debug|info|warn|error|fatal)")
|
||||
LightCmd.Flags().StringVar(&logFormat, "log-format", log.LogFormatPlain, "The logging format (text|json)")
|
||||
LightCmd.Flags().StringVar(&trustLevelStr, "trust-level", "1/3",
|
||||
"trust level. Must be between 1/3 and 3/3",
|
||||
)
|
||||
@@ -104,15 +102,10 @@ func init() {
|
||||
}
|
||||
|
||||
func runProxy(cmd *cobra.Command, args []string) error {
|
||||
// Initialise logger.
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
var option log.Option
|
||||
if verbose {
|
||||
option, _ = log.AllowLevel("debug")
|
||||
} else {
|
||||
option, _ = log.AllowLevel("info")
|
||||
logger, err := log.NewDefaultLogger(logFormat, logLevel, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger = log.NewFilter(logger, option)
|
||||
|
||||
chainID = args[0]
|
||||
logger.Info("Creating client...", "chainID", chainID)
|
||||
@@ -122,10 +115,12 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
witnessesAddrs = strings.Split(witnessAddrsJoined, ",")
|
||||
}
|
||||
|
||||
db, err := dbm.NewGoLevelDB("light-client-db", dir)
|
||||
lightDB, err := dbm.NewGoLevelDB("light-client-db", dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create a db: %w", err)
|
||||
}
|
||||
// create a prefixed db on the chainID
|
||||
db := dbm.NewPrefixDB(lightDB, []byte(chainID))
|
||||
|
||||
if primaryAddr == "" { // check to see if we can start from an existing state
|
||||
var err error
|
||||
@@ -149,25 +144,7 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("can't parse trust level: %w", err)
|
||||
}
|
||||
|
||||
options := []light.Option{
|
||||
light.Logger(logger),
|
||||
light.ConfirmationFunction(func(action string) bool {
|
||||
fmt.Println(action)
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for {
|
||||
scanner.Scan()
|
||||
response := scanner.Text()
|
||||
switch response {
|
||||
case "y", "Y":
|
||||
return true
|
||||
case "n", "N":
|
||||
return false
|
||||
default:
|
||||
fmt.Println("please input 'Y' or 'n' and press ENTER")
|
||||
}
|
||||
}
|
||||
}),
|
||||
}
|
||||
options := []light.Option{light.Logger(logger)}
|
||||
|
||||
if sequential {
|
||||
options = append(options, light.SequentialVerification())
|
||||
@@ -175,40 +152,25 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
options = append(options, light.SkippingVerification(trustLevel))
|
||||
}
|
||||
|
||||
var c *light.Client
|
||||
if trustedHeight > 0 && len(trustedHash) > 0 { // fresh installation
|
||||
c, err = light.NewHTTPClient(
|
||||
context.Background(),
|
||||
chainID,
|
||||
light.TrustOptions{
|
||||
Period: trustingPeriod,
|
||||
Height: trustedHeight,
|
||||
Hash: trustedHash,
|
||||
},
|
||||
primaryAddr,
|
||||
witnessesAddrs,
|
||||
dbs.New(db, chainID),
|
||||
options...,
|
||||
)
|
||||
} else { // continue from latest state
|
||||
c, err = light.NewHTTPClientFromTrustedStore(
|
||||
chainID,
|
||||
trustingPeriod,
|
||||
primaryAddr,
|
||||
witnessesAddrs,
|
||||
dbs.New(db, chainID),
|
||||
options...,
|
||||
)
|
||||
}
|
||||
// Initiate the light client. If the trusted store already has blocks in it, this
|
||||
// will be used else we use the trusted options.
|
||||
c, err := light.NewHTTPClient(
|
||||
context.Background(),
|
||||
chainID,
|
||||
light.TrustOptions{
|
||||
Period: trustingPeriod,
|
||||
Height: trustedHeight,
|
||||
Hash: trustedHash,
|
||||
},
|
||||
primaryAddr,
|
||||
witnessesAddrs,
|
||||
dbs.New(db),
|
||||
options...,
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rpcClient, err := rpchttp.New(primaryAddr, "/websocket")
|
||||
if err != nil {
|
||||
return fmt.Errorf("http client for %s: %w", primaryAddr, err)
|
||||
}
|
||||
|
||||
cfg := rpcserver.DefaultConfig()
|
||||
cfg.MaxBodyBytes = config.RPC.MaxBodyBytes
|
||||
cfg.MaxHeaderBytes = config.RPC.MaxHeaderBytes
|
||||
@@ -220,12 +182,11 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
cfg.WriteTimeout = config.RPC.TimeoutBroadcastTxCommit + 1*time.Second
|
||||
}
|
||||
|
||||
p := lproxy.Proxy{
|
||||
Addr: listenAddr,
|
||||
Config: cfg,
|
||||
Client: lrpc.NewClient(rpcClient, c, lrpc.KeyPathFn(defaultMerkleKeyPathFn())),
|
||||
Logger: logger,
|
||||
p, err := lproxy.NewProxy(c, listenAddr, primaryAddr, cfg, logger, lrpc.KeyPathFn(lrpc.DefaultMerkleKeyPathFn()))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
p.Listener.Close()
|
||||
@@ -264,21 +225,3 @@ func saveProviders(db dbm.DB, primaryAddr, witnessesAddrs string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func defaultMerkleKeyPathFn() lrpc.KeyPathFunc {
|
||||
// regexp for extracting store name from /abci_query path
|
||||
storeNameRegexp := regexp.MustCompile(`\/store\/(.+)\/key`)
|
||||
|
||||
return func(path string, key []byte) (merkle.KeyPath, error) {
|
||||
matches := storeNameRegexp.FindStringSubmatch(path)
|
||||
if len(matches) != 2 {
|
||||
return nil, fmt.Errorf("can't find store name in %s using %s", path, storeNameRegexp)
|
||||
}
|
||||
storeName := matches[1]
|
||||
|
||||
kp := merkle.KeyPath{}
|
||||
kp = kp.AppendKey([]byte(storeName), merkle.KeyEncodingURL)
|
||||
kp = kp.AppendKey(key, merkle.KeyEncodingURL)
|
||||
return kp, nil
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/p2p/upnp"
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/p2p/upnp"
|
||||
)
|
||||
|
||||
// ProbeUpnpCmd adds capabilities to test the UPnP functionality.
|
||||
|
||||
251
cmd/tendermint/commands/reindex_event.go
Normal file
251
cmd/tendermint/commands/reindex_event.go
Normal file
@@ -0,0 +1,251 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
tmdb "github.com/tendermint/tm-db"
|
||||
|
||||
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||
tmcfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/libs/progressbar"
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/state/indexer"
|
||||
"github.com/tendermint/tendermint/state/indexer/sink/kv"
|
||||
"github.com/tendermint/tendermint/state/indexer/sink/psql"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
reindexFailed = "event re-index failed: "
|
||||
)
|
||||
|
||||
// ReIndexEventCmd allows re-index the event by given block height interval
|
||||
var ReIndexEventCmd = &cobra.Command{
|
||||
Use: "reindex-event",
|
||||
Short: "reindex events to the event store backends",
|
||||
Long: `
|
||||
reindex-event is an offline tooling to re-index block and tx events to the eventsinks,
|
||||
you can run this command when the event store backend dropped/disconnected or you want to replace the backend.
|
||||
The default start-height is 0, meaning the tooling will start reindex from the base block height(inclusive); and the
|
||||
default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omits
|
||||
either or both arguments.
|
||||
`,
|
||||
Example: `
|
||||
tendermint reindex-event
|
||||
tendermint reindex-event --start-height 2
|
||||
tendermint reindex-event --end-height 10
|
||||
tendermint reindex-event --start-height 2 --end-height 10
|
||||
`,
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
bs, ss, err := loadStateAndBlockStore(config)
|
||||
if err != nil {
|
||||
fmt.Println(reindexFailed, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := checkValidHeight(bs); err != nil {
|
||||
fmt.Println(reindexFailed, err)
|
||||
return
|
||||
}
|
||||
|
||||
es, err := loadEventSinks(config)
|
||||
if err != nil {
|
||||
fmt.Println(reindexFailed, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = eventReIndex(cmd, es, bs, ss); err != nil {
|
||||
fmt.Println(reindexFailed, err)
|
||||
return
|
||||
}
|
||||
|
||||
fmt.Println("event re-index finished")
|
||||
},
|
||||
}
|
||||
|
||||
var (
|
||||
startHeight int64
|
||||
endHeight int64
|
||||
)
|
||||
|
||||
func init() {
|
||||
ReIndexEventCmd.Flags().Int64Var(&startHeight, "start-height", 0, "the block height would like to start for re-index")
|
||||
ReIndexEventCmd.Flags().Int64Var(&endHeight, "end-height", 0, "the block height would like to finish for re-index")
|
||||
}
|
||||
|
||||
func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) {
|
||||
// Check duplicated sinks.
|
||||
sinks := map[string]bool{}
|
||||
for _, s := range cfg.TxIndex.Indexer {
|
||||
sl := strings.ToLower(s)
|
||||
if sinks[sl] {
|
||||
return nil, errors.New("found duplicated sinks, please check the tx-index section in the config.toml")
|
||||
}
|
||||
sinks[sl] = true
|
||||
}
|
||||
|
||||
eventSinks := []indexer.EventSink{}
|
||||
|
||||
for k := range sinks {
|
||||
switch k {
|
||||
case string(indexer.NULL):
|
||||
return nil, errors.New("found null event sink, please check the tx-index section in the config.toml")
|
||||
case string(indexer.KV):
|
||||
store, err := tmcfg.DefaultDBProvider(&tmcfg.DBContext{ID: "tx_index", Config: cfg})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eventSinks = append(eventSinks, kv.NewEventSink(store))
|
||||
case string(indexer.PSQL):
|
||||
conn := cfg.TxIndex.PsqlConn
|
||||
if conn == "" {
|
||||
return nil, errors.New("the psql connection settings cannot be empty")
|
||||
}
|
||||
es, _, err := psql.NewEventSink(conn, chainID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eventSinks = append(eventSinks, es)
|
||||
default:
|
||||
return nil, errors.New("unsupported event sink type")
|
||||
}
|
||||
}
|
||||
|
||||
if len(eventSinks) == 0 {
|
||||
return nil, errors.New("no proper event sink can do event re-indexing," +
|
||||
" please check the tx-index section in the config.toml")
|
||||
}
|
||||
|
||||
if !indexer.IndexingEnabled(eventSinks) {
|
||||
return nil, fmt.Errorf("no event sink has been enabled")
|
||||
}
|
||||
|
||||
return eventSinks, nil
|
||||
}
|
||||
|
||||
func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, error) {
|
||||
dbType := tmdb.BackendType(cfg.DBBackend)
|
||||
|
||||
// Get BlockStore
|
||||
blockStoreDB, err := tmdb.NewDB("blockstore", dbType, cfg.DBDir())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
blockStore := store.NewBlockStore(blockStoreDB)
|
||||
|
||||
// Get StateStore
|
||||
stateDB, err := tmdb.NewDB("state", dbType, cfg.DBDir())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
stateStore := state.NewStore(stateDB)
|
||||
|
||||
return blockStore, stateStore, nil
|
||||
}
|
||||
|
||||
func eventReIndex(cmd *cobra.Command, es []indexer.EventSink, bs state.BlockStore, ss state.Store) error {
|
||||
|
||||
var bar progressbar.Bar
|
||||
bar.NewOption(startHeight-1, endHeight)
|
||||
|
||||
fmt.Println("start re-indexing events:")
|
||||
defer bar.Finish()
|
||||
for i := startHeight; i <= endHeight; i++ {
|
||||
select {
|
||||
case <-cmd.Context().Done():
|
||||
return fmt.Errorf("event re-index terminated at height %d: %w", i, cmd.Context().Err())
|
||||
default:
|
||||
b := bs.LoadBlock(i)
|
||||
if b == nil {
|
||||
return fmt.Errorf("not able to load block at height %d from the blockstore", i)
|
||||
}
|
||||
|
||||
r, err := ss.LoadABCIResponses(i)
|
||||
if err != nil {
|
||||
return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", i)
|
||||
}
|
||||
|
||||
e := types.EventDataNewBlockHeader{
|
||||
Header: b.Header,
|
||||
NumTxs: int64(len(b.Txs)),
|
||||
ResultBeginBlock: *r.BeginBlock,
|
||||
ResultEndBlock: *r.EndBlock,
|
||||
}
|
||||
|
||||
var batch *indexer.Batch
|
||||
if e.NumTxs > 0 {
|
||||
batch = indexer.NewBatch(e.NumTxs)
|
||||
|
||||
for i, tx := range b.Data.Txs {
|
||||
tr := abcitypes.TxResult{
|
||||
Height: b.Height,
|
||||
Index: uint32(i),
|
||||
Tx: tx,
|
||||
Result: *(r.DeliverTxs[i]),
|
||||
}
|
||||
|
||||
_ = batch.Add(&tr)
|
||||
}
|
||||
}
|
||||
|
||||
for _, sink := range es {
|
||||
if err := sink.IndexBlockEvents(e); err != nil {
|
||||
return fmt.Errorf("block event re-index at height %d failed: %w", i, err)
|
||||
}
|
||||
|
||||
if batch != nil {
|
||||
if err := sink.IndexTxEvents(batch.Ops); err != nil {
|
||||
return fmt.Errorf("tx event re-index at height %d failed: %w", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bar.Play(i)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkValidHeight(bs state.BlockStore) error {
|
||||
base := bs.Base()
|
||||
|
||||
if startHeight == 0 {
|
||||
startHeight = base
|
||||
fmt.Printf("set the start block height to the base height of the blockstore %d \n", base)
|
||||
}
|
||||
|
||||
if startHeight < base {
|
||||
return fmt.Errorf("%s (requested start height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, startHeight, base)
|
||||
}
|
||||
|
||||
height := bs.Height()
|
||||
|
||||
if startHeight > height {
|
||||
return fmt.Errorf(
|
||||
"%s (requested start height: %d, store height: %d)", ctypes.ErrHeightNotAvailable, startHeight, height)
|
||||
}
|
||||
|
||||
if endHeight == 0 || endHeight > height {
|
||||
endHeight = height
|
||||
fmt.Printf("set the end block height to the latest height of the blockstore %d \n", height)
|
||||
}
|
||||
|
||||
if endHeight < base {
|
||||
return fmt.Errorf(
|
||||
"%s (requested end height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, endHeight, base)
|
||||
}
|
||||
|
||||
if endHeight < startHeight {
|
||||
return fmt.Errorf(
|
||||
"%s (requested the end height: %d is less than the start height: %d)",
|
||||
ctypes.ErrInvalidRequest, startHeight, endHeight)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
171
cmd/tendermint/commands/reindex_event_test.go
Normal file
171
cmd/tendermint/commands/reindex_event_test.go
Normal file
@@ -0,0 +1,171 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||
tmcfg "github.com/tendermint/tendermint/config"
|
||||
prototmstate "github.com/tendermint/tendermint/proto/tendermint/state"
|
||||
"github.com/tendermint/tendermint/state/indexer"
|
||||
"github.com/tendermint/tendermint/state/mocks"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
height int64 = 10
|
||||
base int64 = 2
|
||||
)
|
||||
|
||||
func setupReIndexEventCmd() *cobra.Command {
|
||||
reIndexEventCmd := &cobra.Command{
|
||||
Use: ReIndexEventCmd.Use,
|
||||
Run: func(cmd *cobra.Command, args []string) {},
|
||||
}
|
||||
|
||||
_ = reIndexEventCmd.ExecuteContext(context.Background())
|
||||
|
||||
return reIndexEventCmd
|
||||
}
|
||||
|
||||
func TestReIndexEventCheckHeight(t *testing.T) {
|
||||
mockBlockStore := &mocks.BlockStore{}
|
||||
mockBlockStore.
|
||||
On("Base").Return(base).
|
||||
On("Height").Return(height)
|
||||
|
||||
testCases := []struct {
|
||||
startHeight int64
|
||||
endHeight int64
|
||||
validHeight bool
|
||||
}{
|
||||
{0, 0, true},
|
||||
{0, base, true},
|
||||
{0, base - 1, false},
|
||||
{0, height, true},
|
||||
{0, height + 1, true},
|
||||
{0, 0, true},
|
||||
{base - 1, 0, false},
|
||||
{base, 0, true},
|
||||
{base, base, true},
|
||||
{base, base - 1, false},
|
||||
{base, height, true},
|
||||
{base, height + 1, true},
|
||||
{height, 0, true},
|
||||
{height, base, false},
|
||||
{height, height - 1, false},
|
||||
{height, height, true},
|
||||
{height, height + 1, true},
|
||||
{height + 1, 0, false},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
startHeight = tc.startHeight
|
||||
endHeight = tc.endHeight
|
||||
|
||||
err := checkValidHeight(mockBlockStore)
|
||||
if tc.validHeight {
|
||||
require.NoError(t, err)
|
||||
} else {
|
||||
require.Error(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadEventSink(t *testing.T) {
|
||||
testCases := []struct {
|
||||
sinks []string
|
||||
connURL string
|
||||
loadErr bool
|
||||
}{
|
||||
{[]string{}, "", true},
|
||||
{[]string{"NULL"}, "", true},
|
||||
{[]string{"KV"}, "", false},
|
||||
{[]string{"KV", "KV"}, "", true},
|
||||
{[]string{"PSQL"}, "", true}, // true because empty connect url
|
||||
{[]string{"PSQL"}, "wrongUrl", true}, // true because wrong connect url
|
||||
// skip to test PSQL connect with correct url
|
||||
{[]string{"UnsupportedSinkType"}, "wrongUrl", true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
cfg := tmcfg.TestConfig()
|
||||
cfg.TxIndex.Indexer = tc.sinks
|
||||
cfg.TxIndex.PsqlConn = tc.connURL
|
||||
_, err := loadEventSinks(cfg)
|
||||
if tc.loadErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadBlockStore(t *testing.T) {
|
||||
bs, ss, err := loadStateAndBlockStore(tmcfg.TestConfig())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, bs)
|
||||
require.NotNil(t, ss)
|
||||
|
||||
}
|
||||
func TestReIndexEvent(t *testing.T) {
|
||||
mockBlockStore := &mocks.BlockStore{}
|
||||
mockStateStore := &mocks.Store{}
|
||||
mockEventSink := &mocks.EventSink{}
|
||||
|
||||
mockBlockStore.
|
||||
On("Base").Return(base).
|
||||
On("Height").Return(height).
|
||||
On("LoadBlock", base).Return(nil).Once().
|
||||
On("LoadBlock", base).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}}).
|
||||
On("LoadBlock", height).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}})
|
||||
|
||||
mockEventSink.
|
||||
On("Type").Return(indexer.KV).
|
||||
On("IndexBlockEvents", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(errors.New("")).Once().
|
||||
On("IndexBlockEvents", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(nil).
|
||||
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(errors.New("")).Once().
|
||||
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(nil)
|
||||
|
||||
dtx := abcitypes.ResponseDeliverTx{}
|
||||
abciResp := &prototmstate.ABCIResponses{
|
||||
DeliverTxs: []*abcitypes.ResponseDeliverTx{&dtx},
|
||||
EndBlock: &abcitypes.ResponseEndBlock{},
|
||||
BeginBlock: &abcitypes.ResponseBeginBlock{},
|
||||
}
|
||||
|
||||
mockStateStore.
|
||||
On("LoadABCIResponses", base).Return(nil, errors.New("")).Once().
|
||||
On("LoadABCIResponses", base).Return(abciResp, nil).
|
||||
On("LoadABCIResponses", height).Return(abciResp, nil)
|
||||
|
||||
testCases := []struct {
|
||||
startHeight int64
|
||||
endHeight int64
|
||||
reIndexErr bool
|
||||
}{
|
||||
{base, height, true}, // LoadBlock error
|
||||
{base, height, true}, // LoadABCIResponses error
|
||||
{base, height, true}, // index block event error
|
||||
{base, height, true}, // index tx event error
|
||||
{base, base, false},
|
||||
{height, height, false},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
startHeight = tc.startHeight
|
||||
endHeight = tc.endHeight
|
||||
|
||||
err := eventReIndex(setupReIndexEventCmd(), []indexer.EventSink{mockEventSink}, mockBlockStore, mockStateStore)
|
||||
if tc.reIndexErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,8 +2,7 @@ package commands
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/consensus"
|
||||
"github.com/tendermint/tendermint/internal/consensus"
|
||||
)
|
||||
|
||||
// ReplayCmd allows replaying of messages from the WAL.
|
||||
|
||||
@@ -17,7 +17,7 @@ var ResetAllCmd = &cobra.Command{
|
||||
Use: "unsafe-reset-all",
|
||||
Aliases: []string{"unsafe_reset_all"},
|
||||
Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state",
|
||||
Run: resetAll,
|
||||
RunE: resetAll,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
@@ -34,26 +34,26 @@ var ResetPrivValidatorCmd = &cobra.Command{
|
||||
Use: "unsafe-reset-priv-validator",
|
||||
Aliases: []string{"unsafe_reset_priv_validator"},
|
||||
Short: "(unsafe) Reset this node's validator to genesis state",
|
||||
Run: resetPrivValidator,
|
||||
RunE: resetPrivValidator,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetAll(cmd *cobra.Command, args []string) {
|
||||
ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidatorKeyFile(),
|
||||
config.PrivValidatorStateFile(), logger)
|
||||
func resetAll(cmd *cobra.Command, args []string) error {
|
||||
return ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidator.KeyFile(),
|
||||
config.PrivValidator.StateFile(), logger)
|
||||
}
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetPrivValidator(cmd *cobra.Command, args []string) {
|
||||
resetFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), logger)
|
||||
func resetPrivValidator(cmd *cobra.Command, args []string) error {
|
||||
return resetFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile(), logger)
|
||||
}
|
||||
|
||||
// ResetAll removes address book files plus all data, and resets the privValdiator data.
|
||||
// Exported so other CLI tools can use it.
|
||||
func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logger log.Logger) {
|
||||
func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logger log.Logger) error {
|
||||
if keepAddrBook {
|
||||
logger.Info("The address book remains intact")
|
||||
} else {
|
||||
@@ -68,24 +68,28 @@ func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logg
|
||||
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
|
||||
logger.Error("unable to recreate dbDir", "err", err)
|
||||
}
|
||||
resetFilePV(privValKeyFile, privValStateFile, logger)
|
||||
return resetFilePV(privValKeyFile, privValStateFile, logger)
|
||||
}
|
||||
|
||||
func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) {
|
||||
func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) error {
|
||||
if _, err := os.Stat(privValKeyFile); err == nil {
|
||||
pv := privval.LoadFilePVEmptyState(privValKeyFile, privValStateFile)
|
||||
pv, err := privval.LoadFilePVEmptyState(privValKeyFile, privValStateFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pv.Reset()
|
||||
logger.Info("Reset private validator file to genesis state", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
} else {
|
||||
pv, err := privval.GenFilePV(privValKeyFile, privValStateFile, keyType)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return err
|
||||
}
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator file", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeAddrBook(addrBookFile string, logger log.Logger) {
|
||||
|
||||
@@ -2,21 +2,20 @@ package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/cli"
|
||||
tmflags "github.com/tendermint/tendermint/libs/cli/flags"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
var (
|
||||
config = cfg.DefaultConfig()
|
||||
logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
config = cfg.DefaultConfig()
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
ctxTimeout = 4 * time.Second
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -51,20 +50,17 @@ var RootCmd = &cobra.Command{
|
||||
if cmd.Name() == VersionCmd.Name() {
|
||||
return nil
|
||||
}
|
||||
|
||||
config, err = ParseConfig()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if config.LogFormat == cfg.LogFormatJSON {
|
||||
logger = log.NewTMJSONLogger(log.NewSyncWriter(os.Stdout))
|
||||
}
|
||||
logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel())
|
||||
|
||||
logger, err = log.NewDefaultLogger(config.LogFormat, config.LogLevel, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if viper.GetBool(cli.TraceFlag) {
|
||||
logger = log.NewTracingLogger(logger)
|
||||
}
|
||||
|
||||
logger = logger.With("module", "main")
|
||||
return nil
|
||||
},
|
||||
|
||||
@@ -18,10 +18,6 @@ import (
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultRoot = os.ExpandEnv("$HOME/.some/test/dir")
|
||||
)
|
||||
|
||||
// clearConfig clears env vars, the given root dir, and resets viper.
|
||||
func clearConfig(dir string) {
|
||||
if err := os.Unsetenv("TMHOME"); err != nil {
|
||||
@@ -52,10 +48,10 @@ func testRootCmd() *cobra.Command {
|
||||
}
|
||||
|
||||
func testSetup(rootDir string, args []string, env map[string]string) error {
|
||||
clearConfig(defaultRoot)
|
||||
clearConfig(rootDir)
|
||||
|
||||
rootCmd := testRootCmd()
|
||||
cmd := cli.PrepareBaseCmd(rootCmd, "TM", defaultRoot)
|
||||
cmd := cli.PrepareBaseCmd(rootCmd, "TM", rootDir)
|
||||
|
||||
// run with the args and env
|
||||
args = append([]string{rootCmd.Use}, args...)
|
||||
@@ -63,6 +59,7 @@ func testSetup(rootDir string, args []string, env map[string]string) error {
|
||||
}
|
||||
|
||||
func TestRootHome(t *testing.T) {
|
||||
defaultRoot := t.TempDir()
|
||||
newRoot := filepath.Join(defaultRoot, "something-else")
|
||||
cases := []struct {
|
||||
args []string
|
||||
@@ -105,6 +102,7 @@ func TestRootFlagsEnv(t *testing.T) {
|
||||
{nil, map[string]string{"TM_LOG_LEVEL": "debug"}, "debug"}, // right env
|
||||
}
|
||||
|
||||
defaultRoot := t.TempDir()
|
||||
for i, tc := range cases {
|
||||
idxString := strconv.Itoa(i)
|
||||
|
||||
@@ -118,7 +116,7 @@ func TestRootFlagsEnv(t *testing.T) {
|
||||
func TestRootConfig(t *testing.T) {
|
||||
|
||||
// write non-default config
|
||||
nonDefaultLogLvl := "abc:debug"
|
||||
nonDefaultLogLvl := "debug"
|
||||
cvals := map[string]string{
|
||||
"log-level": nonDefaultLogLvl,
|
||||
}
|
||||
@@ -129,12 +127,13 @@ func TestRootConfig(t *testing.T) {
|
||||
|
||||
logLvl string
|
||||
}{
|
||||
{nil, nil, nonDefaultLogLvl}, // should load config
|
||||
{[]string{"--log-level=abc:info"}, nil, "abc:info"}, // flag over rides
|
||||
{nil, map[string]string{"TM_LOG_LEVEL": "abc:info"}, "abc:info"}, // env over rides
|
||||
{nil, nil, nonDefaultLogLvl}, // should load config
|
||||
{[]string{"--log-level=info"}, nil, "info"}, // flag over rides
|
||||
{nil, map[string]string{"TM_LOG_LEVEL": "info"}, "info"}, // env over rides
|
||||
}
|
||||
|
||||
for i, tc := range cases {
|
||||
defaultRoot := t.TempDir()
|
||||
idxString := strconv.Itoa(i)
|
||||
clearConfig(defaultRoot)
|
||||
|
||||
|
||||
@@ -11,7 +11,6 @@ import (
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
nm "github.com/tendermint/tendermint/node"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -24,10 +23,13 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
// bind flags
|
||||
cmd.Flags().String("moniker", config.Moniker, "node name")
|
||||
|
||||
// mode flags
|
||||
cmd.Flags().String("mode", config.Mode, "node mode (full | validator | seed)")
|
||||
|
||||
// priv val flags
|
||||
cmd.Flags().String(
|
||||
"priv-validator-laddr",
|
||||
config.PrivValidatorListenAddr,
|
||||
config.PrivValidator.ListenAddr,
|
||||
"socket address to listen on for connections from external priv-validator process")
|
||||
|
||||
// node flags
|
||||
@@ -46,9 +48,7 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
"proxy-app",
|
||||
config.ProxyApp,
|
||||
"proxy app address, or one of: 'kvstore',"+
|
||||
" 'persistent_kvstore',"+
|
||||
" 'counter',"+
|
||||
" 'counter_serial' or 'noop' for local testing.")
|
||||
" 'persistent_kvstore' or 'noop' for local testing.")
|
||||
cmd.Flags().String("abci", config.ABCI, "specify abci transport (socket | grpc)")
|
||||
|
||||
// rpc flags
|
||||
@@ -71,7 +71,6 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
config.P2P.UnconditionalPeerIDs, "comma-delimited IDs of unconditional peers")
|
||||
cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding")
|
||||
cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "enable/disable Peer-Exchange")
|
||||
cmd.Flags().Bool("p2p.seed-mode", config.P2P.SeedMode, "enable/disable seed mode")
|
||||
cmd.Flags().String("p2p.private-peer-ids", config.P2P.PrivatePeerIDs, "comma-delimited private peer IDs")
|
||||
|
||||
// consensus flags
|
||||
@@ -84,7 +83,10 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
config.Consensus.CreateEmptyBlocksInterval.String(),
|
||||
"the possible interval between empty blocks")
|
||||
|
||||
// db flags
|
||||
addDBFlags(cmd)
|
||||
}
|
||||
|
||||
func addDBFlags(cmd *cobra.Command) {
|
||||
cmd.Flags().String(
|
||||
"db-backend",
|
||||
config.DBBackend,
|
||||
@@ -97,7 +99,7 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
|
||||
// NewRunNodeCmd returns the command that allows the CLI to start a node.
|
||||
// It can be used with a custom PrivValidator and in-process ABCI application.
|
||||
func NewRunNodeCmd(nodeProvider nm.Provider) *cobra.Command {
|
||||
func NewRunNodeCmd(nodeProvider cfg.ServiceProvider) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "start",
|
||||
Aliases: []string{"node", "run"},
|
||||
@@ -116,7 +118,7 @@ func NewRunNodeCmd(nodeProvider nm.Provider) *cobra.Command {
|
||||
return fmt.Errorf("failed to start node: %w", err)
|
||||
}
|
||||
|
||||
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
|
||||
logger.Info("started node", "node", n.String())
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// ShowNodeIDCmd dumps node's ID to the standard output.
|
||||
@@ -18,11 +16,11 @@ var ShowNodeIDCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func showNodeID(cmd *cobra.Command, args []string) error {
|
||||
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
|
||||
nodeKeyID, err := config.LoadNodeKeyID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(nodeKey.ID)
|
||||
fmt.Println(nodeKeyID)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
tmgrpc "github.com/tendermint/tendermint/privval/grpc"
|
||||
)
|
||||
|
||||
// ShowValidatorCmd adds capabilities for showing the validator info.
|
||||
@@ -20,16 +24,51 @@ var ShowValidatorCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func showValidator(cmd *cobra.Command, args []string) error {
|
||||
keyFilePath := config.PrivValidatorKeyFile()
|
||||
if !tmos.FileExists(keyFilePath) {
|
||||
return fmt.Errorf("private validator file %s does not exist", keyFilePath)
|
||||
}
|
||||
var (
|
||||
pubKey crypto.PubKey
|
||||
err error
|
||||
)
|
||||
|
||||
pv := privval.LoadFilePV(keyFilePath, config.PrivValidatorStateFile())
|
||||
//TODO: remove once gRPC is the only supported protocol
|
||||
protocol, _ := tmnet.ProtocolAndAddress(config.PrivValidator.ListenAddr)
|
||||
switch protocol {
|
||||
case "grpc":
|
||||
pvsc, err := tmgrpc.DialRemoteSigner(
|
||||
config.PrivValidator,
|
||||
config.ChainID(),
|
||||
logger,
|
||||
config.Instrumentation.Prometheus,
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't connect to remote validator %w", err)
|
||||
}
|
||||
|
||||
pubKey, err := pv.GetPubKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get pubkey: %w", err)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout)
|
||||
defer cancel()
|
||||
|
||||
pubKey, err = pvsc.GetPubKey(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get pubkey: %w", err)
|
||||
}
|
||||
default:
|
||||
|
||||
keyFilePath := config.PrivValidator.KeyFile()
|
||||
if !tmos.FileExists(keyFilePath) {
|
||||
return fmt.Errorf("private validator file %s does not exist", keyFilePath)
|
||||
}
|
||||
|
||||
pv, err := privval.LoadFilePV(keyFilePath, config.PrivValidator.StateFile())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout)
|
||||
defer cancel()
|
||||
|
||||
pubKey, err = pv.GetPubKey(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get pubkey: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
bz, err := tmjson.Marshal(pubKey)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
@@ -13,11 +14,9 @@ import (
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/bytes"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -105,7 +104,8 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
)
|
||||
}
|
||||
|
||||
config := cfg.DefaultConfig()
|
||||
// set mode to validator for testnet
|
||||
config := cfg.DefaultValidatorConfig()
|
||||
|
||||
// overwrite default config if set and valid
|
||||
if configFile != "" {
|
||||
@@ -143,11 +143,17 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
pvKeyFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidatorKey)
|
||||
pvStateFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidatorState)
|
||||
pv := privval.LoadFilePV(pvKeyFile, pvStateFile)
|
||||
pvKeyFile := filepath.Join(nodeDir, config.PrivValidator.Key)
|
||||
pvStateFile := filepath.Join(nodeDir, config.PrivValidator.State)
|
||||
pv, err := privval.LoadFilePV(pvKeyFile, pvStateFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pubKey, err := pv.GetPubKey()
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout)
|
||||
defer cancel()
|
||||
|
||||
pubKey, err := pv.GetPubKey(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get pubkey: %w", err)
|
||||
}
|
||||
@@ -189,7 +195,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
ConsensusParams: types.DefaultConsensusParams(),
|
||||
}
|
||||
if keyType == "secp256k1" {
|
||||
genDoc.ConsensusParams.Validator = tmproto.ValidatorParams{
|
||||
genDoc.ConsensusParams.Validator = types.ValidatorParams{
|
||||
PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1},
|
||||
}
|
||||
}
|
||||
@@ -205,11 +211,11 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Gather persistent peer addresses.
|
||||
var (
|
||||
persistentPeers string
|
||||
persistentPeers = make([]string, 0)
|
||||
err error
|
||||
)
|
||||
if populatePersistentPeers {
|
||||
persistentPeers, err = persistentPeersString(config)
|
||||
persistentPeers, err = persistentPeersArray(config)
|
||||
if err != nil {
|
||||
_ = os.RemoveAll(outputDir)
|
||||
return err
|
||||
@@ -223,11 +229,18 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
config.P2P.AddrBookStrict = false
|
||||
config.P2P.AllowDuplicateIP = true
|
||||
if populatePersistentPeers {
|
||||
config.P2P.PersistentPeers = persistentPeers
|
||||
persistentPeersWithoutSelf := make([]string, 0)
|
||||
for j := 0; j < len(persistentPeers); j++ {
|
||||
if j == i {
|
||||
continue
|
||||
}
|
||||
persistentPeersWithoutSelf = append(persistentPeersWithoutSelf, persistentPeers[j])
|
||||
}
|
||||
config.P2P.PersistentPeers = strings.Join(persistentPeersWithoutSelf, ",")
|
||||
}
|
||||
config.Moniker = moniker(i)
|
||||
|
||||
cfg.WriteConfigFile(filepath.Join(nodeDir, "config", "config.toml"), config)
|
||||
cfg.WriteConfigFile(nodeDir, config)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully initialized %v node directories\n", nValidators+nNonValidators)
|
||||
@@ -254,18 +267,19 @@ func hostnameOrIP(i int) string {
|
||||
return ip.String()
|
||||
}
|
||||
|
||||
func persistentPeersString(config *cfg.Config) (string, error) {
|
||||
persistentPeers := make([]string, nValidators+nNonValidators)
|
||||
// get an array of persistent peers
|
||||
func persistentPeersArray(config *cfg.Config) ([]string, error) {
|
||||
peers := make([]string, nValidators+nNonValidators)
|
||||
for i := 0; i < nValidators+nNonValidators; i++ {
|
||||
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
|
||||
config.SetRoot(nodeDir)
|
||||
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
|
||||
nodeKey, err := config.LoadNodeKeyID()
|
||||
if err != nil {
|
||||
return "", err
|
||||
return []string{}, err
|
||||
}
|
||||
persistentPeers[i] = p2p.IDAddressString(nodeKey.ID, fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort))
|
||||
peers[i] = nodeKey.AddressString(fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort))
|
||||
}
|
||||
return strings.Join(persistentPeers, ","), nil
|
||||
return peers, nil
|
||||
}
|
||||
|
||||
func moniker(i int) string {
|
||||
|
||||
@@ -13,6 +13,6 @@ var VersionCmd = &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Show version info",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
fmt.Println(version.TMCoreSemVer)
|
||||
fmt.Println(version.TMVersion)
|
||||
},
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ func main() {
|
||||
rootCmd := cmd.RootCmd
|
||||
rootCmd.AddCommand(
|
||||
cmd.GenValidatorCmd,
|
||||
cmd.ReIndexEventCmd,
|
||||
cmd.InitFilesCmd,
|
||||
cmd.ProbeUpnpCmd,
|
||||
cmd.LightCmd,
|
||||
@@ -27,6 +28,7 @@ func main() {
|
||||
cmd.ShowNodeIDCmd,
|
||||
cmd.GenNodeKeyCmd,
|
||||
cmd.VersionCmd,
|
||||
cmd.MakeKeyMigrateCommand(),
|
||||
debug.DebugCmd,
|
||||
cli.NewCompletionCmd(rootCmd, true),
|
||||
)
|
||||
@@ -38,8 +40,8 @@ func main() {
|
||||
// * Supply a genesis doc file from another source
|
||||
// * Provide their own DB implementation
|
||||
// can copy this file and use something other than the
|
||||
// DefaultNewNode function
|
||||
nodeFunc := nm.DefaultNewNode
|
||||
// node.NewDefault function
|
||||
nodeFunc := nm.NewDefault
|
||||
|
||||
// Create & start node
|
||||
rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc))
|
||||
|
||||
477
config/config.go
477
config/config.go
@@ -4,10 +4,16 @@ import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -16,10 +22,18 @@ const (
|
||||
// FuzzModeDelay is a mode in which we randomly sleep
|
||||
FuzzModeDelay
|
||||
|
||||
// LogFormatPlain is a format for colored text
|
||||
LogFormatPlain = "plain"
|
||||
// LogFormatJSON is a format for json output
|
||||
LogFormatJSON = "json"
|
||||
// DefaultLogLevel defines a default log level as INFO.
|
||||
DefaultLogLevel = "info"
|
||||
|
||||
ModeFull = "full"
|
||||
ModeValidator = "validator"
|
||||
ModeSeed = "seed"
|
||||
|
||||
BlockSyncV0 = "v0"
|
||||
BlockSyncV2 = "v2"
|
||||
|
||||
MempoolV0 = "v0"
|
||||
MempoolV1 = "v1"
|
||||
)
|
||||
|
||||
// NOTE: Most of the structs & relevant comments + the
|
||||
@@ -36,6 +50,7 @@ var (
|
||||
defaultConfigFileName = "config.toml"
|
||||
defaultGenesisJSONName = "genesis.json"
|
||||
|
||||
defaultMode = ModeFull
|
||||
defaultPrivValKeyName = "priv_validator_key.json"
|
||||
defaultPrivValStateName = "priv_validator_state.json"
|
||||
|
||||
@@ -61,10 +76,11 @@ type Config struct {
|
||||
P2P *P2PConfig `mapstructure:"p2p"`
|
||||
Mempool *MempoolConfig `mapstructure:"mempool"`
|
||||
StateSync *StateSyncConfig `mapstructure:"statesync"`
|
||||
FastSync *FastSyncConfig `mapstructure:"fastsync"`
|
||||
BlockSync *BlockSyncConfig `mapstructure:"fastsync"`
|
||||
Consensus *ConsensusConfig `mapstructure:"consensus"`
|
||||
TxIndex *TxIndexConfig `mapstructure:"tx-index"`
|
||||
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
|
||||
PrivValidator *PrivValidatorConfig `mapstructure:"priv-validator"`
|
||||
}
|
||||
|
||||
// DefaultConfig returns a default configuration for a Tendermint node
|
||||
@@ -75,13 +91,21 @@ func DefaultConfig() *Config {
|
||||
P2P: DefaultP2PConfig(),
|
||||
Mempool: DefaultMempoolConfig(),
|
||||
StateSync: DefaultStateSyncConfig(),
|
||||
FastSync: DefaultFastSyncConfig(),
|
||||
BlockSync: DefaultBlockSyncConfig(),
|
||||
Consensus: DefaultConsensusConfig(),
|
||||
TxIndex: DefaultTxIndexConfig(),
|
||||
Instrumentation: DefaultInstrumentationConfig(),
|
||||
PrivValidator: DefaultPrivValidatorConfig(),
|
||||
}
|
||||
}
|
||||
|
||||
// DefaultValidatorConfig returns default config with mode as validator
|
||||
func DefaultValidatorConfig() *Config {
|
||||
cfg := DefaultConfig()
|
||||
cfg.Mode = ModeValidator
|
||||
return cfg
|
||||
}
|
||||
|
||||
// TestConfig returns a configuration that can be used for testing
|
||||
func TestConfig() *Config {
|
||||
return &Config{
|
||||
@@ -90,10 +114,11 @@ func TestConfig() *Config {
|
||||
P2P: TestP2PConfig(),
|
||||
Mempool: TestMempoolConfig(),
|
||||
StateSync: TestStateSyncConfig(),
|
||||
FastSync: TestFastSyncConfig(),
|
||||
BlockSync: TestBlockSyncConfig(),
|
||||
Consensus: TestConsensusConfig(),
|
||||
TxIndex: TestTxIndexConfig(),
|
||||
Instrumentation: TestInstrumentationConfig(),
|
||||
PrivValidator: DefaultPrivValidatorConfig(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -104,6 +129,7 @@ func (cfg *Config) SetRoot(root string) *Config {
|
||||
cfg.P2P.RootDir = root
|
||||
cfg.Mempool.RootDir = root
|
||||
cfg.Consensus.RootDir = root
|
||||
cfg.PrivValidator.RootDir = root
|
||||
return cfg
|
||||
}
|
||||
|
||||
@@ -125,7 +151,7 @@ func (cfg *Config) ValidateBasic() error {
|
||||
if err := cfg.StateSync.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("error in [statesync] section: %w", err)
|
||||
}
|
||||
if err := cfg.FastSync.ValidateBasic(); err != nil {
|
||||
if err := cfg.BlockSync.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("error in [fastsync] section: %w", err)
|
||||
}
|
||||
if err := cfg.Consensus.ValidateBasic(); err != nil {
|
||||
@@ -156,9 +182,22 @@ type BaseConfig struct { //nolint: maligned
|
||||
// A custom human readable name for this node
|
||||
Moniker string `mapstructure:"moniker"`
|
||||
|
||||
// Mode of Node: full | validator | seed
|
||||
// * validator
|
||||
// - all reactors
|
||||
// - with priv_validator_key.json, priv_validator_state.json
|
||||
// * full
|
||||
// - all reactors
|
||||
// - No priv_validator_key.json, priv_validator_state.json
|
||||
// * seed
|
||||
// - only P2P, PEX Reactor
|
||||
// - No priv_validator_key.json, priv_validator_state.json
|
||||
Mode string `mapstructure:"mode"`
|
||||
|
||||
// If this node is many blocks behind the tip of the chain, FastSync
|
||||
// allows them to catchup quickly by downloading blocks in parallel
|
||||
// and verifying their commits
|
||||
// TODO: This should be moved to the blocksync config
|
||||
FastSyncMode bool `mapstructure:"fast-sync"`
|
||||
|
||||
// Database backend: goleveldb | cleveldb | boltdb | rocksdb
|
||||
@@ -194,26 +233,6 @@ type BaseConfig struct { //nolint: maligned
|
||||
// Path to the JSON file containing the initial validator set and other meta data
|
||||
Genesis string `mapstructure:"genesis-file"`
|
||||
|
||||
// Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
PrivValidatorKey string `mapstructure:"priv-validator-key-file"`
|
||||
|
||||
// Path to the JSON file containing the last sign state of a validator
|
||||
PrivValidatorState string `mapstructure:"priv-validator-state-file"`
|
||||
|
||||
// TCP or UNIX socket address for Tendermint to listen on for
|
||||
// connections from an external PrivValidator process
|
||||
PrivValidatorListenAddr string `mapstructure:"priv-validator-laddr"`
|
||||
|
||||
// Client certificate generated while creating needed files for secure connection.
|
||||
// If a remote validator address is provided but no certificate, the connection will be insecure
|
||||
PrivValidatorClientCertificate string `mapstructure:"priv-validator-client-certificate-file"`
|
||||
|
||||
// Client key generated while creating certificates for secure connection
|
||||
PrivValidatorClientKey string `mapstructure:"priv-validator-client-key-file"`
|
||||
|
||||
// Path Root Certificate Authority used to sign both client and server certificates
|
||||
PrivValidatorRootCA string `mapstructure:"priv-validator-root-ca-file"`
|
||||
|
||||
// A JSON file containing the private key to use for p2p authenticated encryption
|
||||
NodeKey string `mapstructure:"node-key-file"`
|
||||
|
||||
@@ -228,19 +247,18 @@ type BaseConfig struct { //nolint: maligned
|
||||
// DefaultBaseConfig returns a default base configuration for a Tendermint node
|
||||
func DefaultBaseConfig() BaseConfig {
|
||||
return BaseConfig{
|
||||
Genesis: defaultGenesisJSONPath,
|
||||
PrivValidatorKey: defaultPrivValKeyPath,
|
||||
PrivValidatorState: defaultPrivValStatePath,
|
||||
NodeKey: defaultNodeKeyPath,
|
||||
Moniker: defaultMoniker,
|
||||
ProxyApp: "tcp://127.0.0.1:26658",
|
||||
ABCI: "socket",
|
||||
LogLevel: DefaultPackageLogLevels(),
|
||||
LogFormat: LogFormatPlain,
|
||||
FastSyncMode: true,
|
||||
FilterPeers: false,
|
||||
DBBackend: "goleveldb",
|
||||
DBPath: "data",
|
||||
Genesis: defaultGenesisJSONPath,
|
||||
NodeKey: defaultNodeKeyPath,
|
||||
Mode: defaultMode,
|
||||
Moniker: defaultMoniker,
|
||||
ProxyApp: "tcp://127.0.0.1:26658",
|
||||
ABCI: "socket",
|
||||
LogLevel: DefaultLogLevel,
|
||||
LogFormat: log.LogFormatPlain,
|
||||
FastSyncMode: true,
|
||||
FilterPeers: false,
|
||||
DBBackend: "goleveldb",
|
||||
DBPath: "data",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -248,6 +266,7 @@ func DefaultBaseConfig() BaseConfig {
|
||||
func TestBaseConfig() BaseConfig {
|
||||
cfg := DefaultBaseConfig()
|
||||
cfg.chainID = "tendermint_test"
|
||||
cfg.Mode = ModeValidator
|
||||
cfg.ProxyApp = "kvstore"
|
||||
cfg.FastSyncMode = false
|
||||
cfg.DBBackend = "memdb"
|
||||
@@ -263,74 +282,145 @@ func (cfg BaseConfig) GenesisFile() string {
|
||||
return rootify(cfg.Genesis, cfg.RootDir)
|
||||
}
|
||||
|
||||
// PrivValidatorClientKeyFile returns the full path to the priv_validator_key.json file
|
||||
func (cfg BaseConfig) PrivValidatorClientKeyFile() string {
|
||||
return rootify(cfg.PrivValidatorClientKey, cfg.RootDir)
|
||||
}
|
||||
|
||||
// PrivValidatorClientCertificateFile returns the full path to the priv_validator_key.json file
|
||||
func (cfg BaseConfig) PrivValidatorClientCertificateFile() string {
|
||||
return rootify(cfg.PrivValidatorClientCertificate, cfg.RootDir)
|
||||
}
|
||||
|
||||
// PrivValidatorCertificateAuthorityFile returns the full path to the priv_validator_key.json file
|
||||
func (cfg BaseConfig) PrivValidatorRootCAFile() string {
|
||||
return rootify(cfg.PrivValidatorRootCA, cfg.RootDir)
|
||||
}
|
||||
|
||||
// PrivValidatorKeyFile returns the full path to the priv_validator_key.json file
|
||||
func (cfg BaseConfig) PrivValidatorKeyFile() string {
|
||||
return rootify(cfg.PrivValidatorKey, cfg.RootDir)
|
||||
}
|
||||
|
||||
// PrivValidatorFile returns the full path to the priv_validator_state.json file
|
||||
func (cfg BaseConfig) PrivValidatorStateFile() string {
|
||||
return rootify(cfg.PrivValidatorState, cfg.RootDir)
|
||||
}
|
||||
|
||||
// NodeKeyFile returns the full path to the node_key.json file
|
||||
func (cfg BaseConfig) NodeKeyFile() string {
|
||||
return rootify(cfg.NodeKey, cfg.RootDir)
|
||||
}
|
||||
|
||||
// LoadNodeKey loads NodeKey located in filePath.
|
||||
func (cfg BaseConfig) LoadNodeKeyID() (types.NodeID, error) {
|
||||
jsonBytes, err := ioutil.ReadFile(cfg.NodeKeyFile())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
nodeKey := types.NodeKey{}
|
||||
err = tmjson.Unmarshal(jsonBytes, &nodeKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
nodeKey.ID = types.NodeIDFromPubKey(nodeKey.PubKey())
|
||||
return nodeKey.ID, nil
|
||||
}
|
||||
|
||||
// LoadOrGenNodeKey attempts to load the NodeKey from the given filePath. If
|
||||
// the file does not exist, it generates and saves a new NodeKey.
|
||||
func (cfg BaseConfig) LoadOrGenNodeKeyID() (types.NodeID, error) {
|
||||
if tmos.FileExists(cfg.NodeKeyFile()) {
|
||||
nodeKey, err := cfg.LoadNodeKeyID()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return nodeKey, nil
|
||||
}
|
||||
|
||||
nodeKey := types.GenNodeKey()
|
||||
|
||||
if err := nodeKey.SaveAs(cfg.NodeKeyFile()); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return nodeKey.ID, nil
|
||||
}
|
||||
|
||||
// DBDir returns the full path to the database directory
|
||||
func (cfg BaseConfig) DBDir() string {
|
||||
return rootify(cfg.DBPath, cfg.RootDir)
|
||||
}
|
||||
|
||||
func (cfg *BaseConfig) ArePrivValidatorClientSecurityOptionsPresent() bool {
|
||||
switch {
|
||||
case cfg.PrivValidatorRootCA == "":
|
||||
return false
|
||||
case cfg.PrivValidatorClientKey == "":
|
||||
return false
|
||||
case cfg.PrivValidatorClientCertificate == "":
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
||||
// returns an error if any check fails.
|
||||
func (cfg BaseConfig) ValidateBasic() error {
|
||||
switch cfg.LogFormat {
|
||||
case LogFormatPlain, LogFormatJSON:
|
||||
case log.LogFormatJSON, log.LogFormatText, log.LogFormatPlain:
|
||||
default:
|
||||
return errors.New("unknown log format (must be 'plain' or 'json')")
|
||||
return errors.New("unknown log format (must be 'plain', 'text' or 'json')")
|
||||
}
|
||||
|
||||
switch cfg.Mode {
|
||||
case ModeFull, ModeValidator, ModeSeed:
|
||||
case "":
|
||||
return errors.New("no mode has been set")
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown mode: %v", cfg.Mode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DefaultLogLevel returns a default log level of "error"
|
||||
func DefaultLogLevel() string {
|
||||
return "error"
|
||||
//-----------------------------------------------------------------------------
|
||||
// PrivValidatorConfig
|
||||
|
||||
// PrivValidatorConfig defines the configuration parameters for running a validator
|
||||
type PrivValidatorConfig struct {
|
||||
RootDir string `mapstructure:"home"`
|
||||
|
||||
// Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
Key string `mapstructure:"key-file"`
|
||||
|
||||
// Path to the JSON file containing the last sign state of a validator
|
||||
State string `mapstructure:"state-file"`
|
||||
|
||||
// TCP or UNIX socket address for Tendermint to listen on for
|
||||
// connections from an external PrivValidator process
|
||||
ListenAddr string `mapstructure:"laddr"`
|
||||
|
||||
// Client certificate generated while creating needed files for secure connection.
|
||||
// If a remote validator address is provided but no certificate, the connection will be insecure
|
||||
ClientCertificate string `mapstructure:"client-certificate-file"`
|
||||
|
||||
// Client key generated while creating certificates for secure connection
|
||||
ClientKey string `mapstructure:"client-key-file"`
|
||||
|
||||
// Path Root Certificate Authority used to sign both client and server certificates
|
||||
RootCA string `mapstructure:"root-ca-file"`
|
||||
}
|
||||
|
||||
// DefaultPackageLogLevels returns a default log level setting so all packages
|
||||
// log at "error", while the `state` and `main` packages log at "info"
|
||||
func DefaultPackageLogLevels() string {
|
||||
return fmt.Sprintf("main:info,state:info,statesync:info,*:%s", DefaultLogLevel())
|
||||
// DefaultBaseConfig returns a default private validator configuration
|
||||
// for a Tendermint node.
|
||||
func DefaultPrivValidatorConfig() *PrivValidatorConfig {
|
||||
return &PrivValidatorConfig{
|
||||
Key: defaultPrivValKeyPath,
|
||||
State: defaultPrivValStatePath,
|
||||
}
|
||||
}
|
||||
|
||||
// ClientKeyFile returns the full path to the priv_validator_key.json file
|
||||
func (cfg *PrivValidatorConfig) ClientKeyFile() string {
|
||||
return rootify(cfg.ClientKey, cfg.RootDir)
|
||||
}
|
||||
|
||||
// ClientCertificateFile returns the full path to the priv_validator_key.json file
|
||||
func (cfg *PrivValidatorConfig) ClientCertificateFile() string {
|
||||
return rootify(cfg.ClientCertificate, cfg.RootDir)
|
||||
}
|
||||
|
||||
// CertificateAuthorityFile returns the full path to the priv_validator_key.json file
|
||||
func (cfg *PrivValidatorConfig) RootCAFile() string {
|
||||
return rootify(cfg.RootCA, cfg.RootDir)
|
||||
}
|
||||
|
||||
// KeyFile returns the full path to the priv_validator_key.json file
|
||||
func (cfg *PrivValidatorConfig) KeyFile() string {
|
||||
return rootify(cfg.Key, cfg.RootDir)
|
||||
}
|
||||
|
||||
// StateFile returns the full path to the priv_validator_state.json file
|
||||
func (cfg *PrivValidatorConfig) StateFile() string {
|
||||
return rootify(cfg.State, cfg.RootDir)
|
||||
}
|
||||
|
||||
func (cfg *PrivValidatorConfig) AreSecurityOptionsPresent() bool {
|
||||
switch {
|
||||
case cfg.RootCA == "":
|
||||
return false
|
||||
case cfg.ClientKey == "":
|
||||
return false
|
||||
case cfg.ClientCertificate == "":
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@@ -357,6 +447,7 @@ type RPCConfig struct {
|
||||
|
||||
// TCP or UNIX socket address for the gRPC server to listen on
|
||||
// NOTE: This server only supports /broadcast_tx_commit
|
||||
// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36.
|
||||
GRPCListenAddress string `mapstructure:"grpc-laddr"`
|
||||
|
||||
// Maximum number of simultaneous connections.
|
||||
@@ -364,6 +455,7 @@ type RPCConfig struct {
|
||||
// If you want to accept a larger number than the default, make sure
|
||||
// you increase your OS limits.
|
||||
// 0 - unlimited.
|
||||
// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36.
|
||||
GRPCMaxOpenConnections int `mapstructure:"grpc-max-open-connections"`
|
||||
|
||||
// Activate unsafe RPC commands like /dial-persistent-peers and /unsafe-flush-mempool
|
||||
@@ -401,7 +493,7 @@ type RPCConfig struct {
|
||||
MaxHeaderBytes int `mapstructure:"max-header-bytes"`
|
||||
|
||||
// The path to a file containing certificate that is used to create the HTTPS server.
|
||||
// Migth be either absolute path or path related to tendermint's config directory.
|
||||
// Might be either absolute path or path related to Tendermint's config directory.
|
||||
//
|
||||
// If the certificate is signed by a certificate authority,
|
||||
// the certFile should be the concatenation of the server's certificate, any intermediates,
|
||||
@@ -412,7 +504,7 @@ type RPCConfig struct {
|
||||
TLSCertFile string `mapstructure:"tls-cert-file"`
|
||||
|
||||
// The path to a file containing matching private key that is used to create the HTTPS server.
|
||||
// Migth be either absolute path or path related to tendermint's config directory.
|
||||
// Might be either absolute path or path related to tendermint's config directory.
|
||||
//
|
||||
// NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server.
|
||||
// Otherwise, HTTP server is run.
|
||||
@@ -523,8 +615,16 @@ type P2PConfig struct { //nolint: maligned
|
||||
|
||||
// Comma separated list of seed nodes to connect to
|
||||
// We only use these if we can’t connect to peers in the addrbook
|
||||
// NOTE: not used by the new PEX reactor. Please use BootstrapPeers instead.
|
||||
// TODO: Remove once p2p refactor is complete
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
Seeds string `mapstructure:"seeds"`
|
||||
|
||||
// Comma separated list of peers to be added to the peer store
|
||||
// on startup. Either BootstrapPeers or PersistentPeers are
|
||||
// needed for peer discovery
|
||||
BootstrapPeers string `mapstructure:"bootstrap-peers"`
|
||||
|
||||
// Comma separated list of nodes to keep persistent connections to
|
||||
PersistentPeers string `mapstructure:"persistent-peers"`
|
||||
|
||||
@@ -539,11 +639,25 @@ type P2PConfig struct { //nolint: maligned
|
||||
AddrBookStrict bool `mapstructure:"addr-book-strict"`
|
||||
|
||||
// Maximum number of inbound peers
|
||||
//
|
||||
// TODO: Remove once p2p refactor is complete in favor of MaxConnections.
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
MaxNumInboundPeers int `mapstructure:"max-num-inbound-peers"`
|
||||
|
||||
// Maximum number of outbound peers to connect to, excluding persistent peers
|
||||
// Maximum number of outbound peers to connect to, excluding persistent peers.
|
||||
//
|
||||
// TODO: Remove once p2p refactor is complete in favor of MaxConnections.
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
MaxNumOutboundPeers int `mapstructure:"max-num-outbound-peers"`
|
||||
|
||||
// MaxConnections defines the maximum number of connected peers (inbound and
|
||||
// outbound).
|
||||
MaxConnections uint16 `mapstructure:"max-connections"`
|
||||
|
||||
// MaxIncomingConnectionAttempts rate limits the number of incoming connection
|
||||
// attempts per IP address.
|
||||
MaxIncomingConnectionAttempts uint `mapstructure:"max-incoming-connection-attempts"`
|
||||
|
||||
// List of node IDs, to which a connection will be (re)established ignoring any existing limits
|
||||
UnconditionalPeerIDs string `mapstructure:"unconditional-peer-ids"`
|
||||
|
||||
@@ -565,12 +679,6 @@ type P2PConfig struct { //nolint: maligned
|
||||
// Set true to enable the peer-exchange reactor
|
||||
PexReactor bool `mapstructure:"pex"`
|
||||
|
||||
// Seed mode, in which node constantly crawls the network and looks for
|
||||
// peers. If another node asks it for addresses, it responds and disconnects.
|
||||
//
|
||||
// Does not work if the peer-exchange reactor is disabled.
|
||||
SeedMode bool `mapstructure:"seed-mode"`
|
||||
|
||||
// Comma separated list of peer IDs to keep private (will not be gossiped to
|
||||
// other peers)
|
||||
PrivatePeerIDs string `mapstructure:"private-peer-ids"`
|
||||
@@ -585,20 +693,31 @@ type P2PConfig struct { //nolint: maligned
|
||||
// Testing params.
|
||||
// Force dial to fail
|
||||
TestDialFail bool `mapstructure:"test-dial-fail"`
|
||||
|
||||
// DisableLegacy is used mostly for testing to enable or disable the legacy
|
||||
// P2P stack.
|
||||
DisableLegacy bool `mapstructure:"disable-legacy"`
|
||||
|
||||
// Makes it possible to configure which queue backend the p2p
|
||||
// layer uses. Options are: "fifo", "priority" and "wdrr",
|
||||
// with the default being "fifo".
|
||||
QueueType string `mapstructure:"queue-type"`
|
||||
}
|
||||
|
||||
// DefaultP2PConfig returns a default configuration for the peer-to-peer layer
|
||||
func DefaultP2PConfig() *P2PConfig {
|
||||
return &P2PConfig{
|
||||
ListenAddress: "tcp://0.0.0.0:26656",
|
||||
ExternalAddress: "",
|
||||
UPNP: false,
|
||||
AddrBook: defaultAddrBookPath,
|
||||
AddrBookStrict: true,
|
||||
MaxNumInboundPeers: 40,
|
||||
MaxNumOutboundPeers: 10,
|
||||
PersistentPeersMaxDialPeriod: 0 * time.Second,
|
||||
FlushThrottleTimeout: 100 * time.Millisecond,
|
||||
ListenAddress: "tcp://0.0.0.0:26656",
|
||||
ExternalAddress: "",
|
||||
UPNP: false,
|
||||
AddrBook: defaultAddrBookPath,
|
||||
AddrBookStrict: true,
|
||||
MaxNumInboundPeers: 40,
|
||||
MaxNumOutboundPeers: 10,
|
||||
MaxConnections: 64,
|
||||
MaxIncomingConnectionAttempts: 100,
|
||||
PersistentPeersMaxDialPeriod: 0 * time.Second,
|
||||
FlushThrottleTimeout: 100 * time.Millisecond,
|
||||
// The MTU (Maximum Transmission Unit) for Ethernet is 1500 bytes.
|
||||
// The IP header and the TCP header take up 20 bytes each at least (unless
|
||||
// optional header fields are used) and thus the max for (non-Jumbo frame)
|
||||
@@ -608,11 +727,11 @@ func DefaultP2PConfig() *P2PConfig {
|
||||
SendRate: 5120000, // 5 mB/s
|
||||
RecvRate: 5120000, // 5 mB/s
|
||||
PexReactor: true,
|
||||
SeedMode: false,
|
||||
AllowDuplicateIP: false,
|
||||
HandshakeTimeout: 20 * time.Second,
|
||||
DialTimeout: 3 * time.Second,
|
||||
TestDialFail: false,
|
||||
QueueType: "priority",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -660,45 +779,69 @@ func (cfg *P2PConfig) ValidateBasic() error {
|
||||
//-----------------------------------------------------------------------------
|
||||
// MempoolConfig
|
||||
|
||||
// MempoolConfig defines the configuration options for the Tendermint mempool
|
||||
// MempoolConfig defines the configuration options for the Tendermint mempool.
|
||||
type MempoolConfig struct {
|
||||
Version string `mapstructure:"version"`
|
||||
RootDir string `mapstructure:"home"`
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
WalPath string `mapstructure:"wal-dir"`
|
||||
|
||||
// Maximum number of transactions in the mempool
|
||||
Size int `mapstructure:"size"`
|
||||
|
||||
// Limit the total size of all txs in the mempool.
|
||||
// This only accounts for raw transactions (e.g. given 1MB transactions and
|
||||
// max-txs-bytes=5MB, mempool will only accept 5 transactions).
|
||||
MaxTxsBytes int64 `mapstructure:"max-txs-bytes"`
|
||||
|
||||
// Size of the cache (used to filter transactions we saw earlier) in transactions
|
||||
CacheSize int `mapstructure:"cache-size"`
|
||||
|
||||
// Do not remove invalid transactions from the cache (default: false)
|
||||
// Set to true if it's not possible for any invalid transaction to become
|
||||
// valid again in the future.
|
||||
KeepInvalidTxsInCache bool `mapstructure:"keep-invalid-txs-in-cache"`
|
||||
|
||||
// Maximum size of a single transaction
|
||||
// NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}.
|
||||
MaxTxBytes int `mapstructure:"max-tx-bytes"`
|
||||
|
||||
// Maximum size of a batch of transactions to send to a peer
|
||||
// Including space needed by encoding (one varint per transaction).
|
||||
// XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
||||
MaxBatchBytes int `mapstructure:"max-batch-bytes"`
|
||||
|
||||
// TTLDuration, if non-zero, defines the maximum amount of time a transaction
|
||||
// can exist for in the mempool.
|
||||
//
|
||||
// Note, if TTLNumBlocks is also defined, a transaction will be removed if it
|
||||
// has existed in the mempool at least TTLNumBlocks number of blocks or if it's
|
||||
// insertion time into the mempool is beyond TTLDuration.
|
||||
TTLDuration time.Duration `mapstructure:"ttl-duration"`
|
||||
|
||||
// TTLNumBlocks, if non-zero, defines the maximum number of blocks a transaction
|
||||
// can exist for in the mempool.
|
||||
//
|
||||
// Note, if TTLDuration is also defined, a transaction will be removed if it
|
||||
// has existed in the mempool at least TTLNumBlocks number of blocks or if
|
||||
// it's insertion time into the mempool is beyond TTLDuration.
|
||||
TTLNumBlocks int64 `mapstructure:"ttl-num-blocks"`
|
||||
}
|
||||
|
||||
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool
|
||||
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool.
|
||||
func DefaultMempoolConfig() *MempoolConfig {
|
||||
return &MempoolConfig{
|
||||
Version: MempoolV1,
|
||||
Recheck: true,
|
||||
Broadcast: true,
|
||||
WalPath: "",
|
||||
// Each signature verification takes .5ms, Size reduced until we implement
|
||||
// ABCI Recheck
|
||||
Size: 5000,
|
||||
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
|
||||
CacheSize: 10000,
|
||||
MaxTxBytes: 1024 * 1024, // 1MB
|
||||
Size: 5000,
|
||||
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
|
||||
CacheSize: 10000,
|
||||
MaxTxBytes: 1024 * 1024, // 1MB
|
||||
TTLDuration: 0 * time.Second,
|
||||
TTLNumBlocks: 0,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -709,16 +852,6 @@ func TestMempoolConfig() *MempoolConfig {
|
||||
return cfg
|
||||
}
|
||||
|
||||
// WalDir returns the full path to the mempool's write-ahead log
|
||||
func (cfg *MempoolConfig) WalDir() string {
|
||||
return rootify(cfg.WalPath, cfg.RootDir)
|
||||
}
|
||||
|
||||
// WalEnabled returns true if the WAL is enabled.
|
||||
func (cfg *MempoolConfig) WalEnabled() bool {
|
||||
return cfg.WalPath != ""
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
||||
// returns an error if any check fails.
|
||||
func (cfg *MempoolConfig) ValidateBasic() error {
|
||||
@@ -734,6 +867,13 @@ func (cfg *MempoolConfig) ValidateBasic() error {
|
||||
if cfg.MaxTxBytes < 0 {
|
||||
return errors.New("max-tx-bytes can't be negative")
|
||||
}
|
||||
if cfg.TTLDuration < 0 {
|
||||
return errors.New("ttl-duration can't be negative")
|
||||
}
|
||||
if cfg.TTLNumBlocks < 0 {
|
||||
return errors.New("ttl-num-blocks can't be negative")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -742,13 +882,15 @@ func (cfg *MempoolConfig) ValidateBasic() error {
|
||||
|
||||
// StateSyncConfig defines the configuration for the Tendermint state sync service
|
||||
type StateSyncConfig struct {
|
||||
Enable bool `mapstructure:"enable"`
|
||||
TempDir string `mapstructure:"temp-dir"`
|
||||
RPCServers []string `mapstructure:"rpc-servers"`
|
||||
TrustPeriod time.Duration `mapstructure:"trust-period"`
|
||||
TrustHeight int64 `mapstructure:"trust-height"`
|
||||
TrustHash string `mapstructure:"trust-hash"`
|
||||
DiscoveryTime time.Duration `mapstructure:"discovery-time"`
|
||||
Enable bool `mapstructure:"enable"`
|
||||
TempDir string `mapstructure:"temp-dir"`
|
||||
RPCServers []string `mapstructure:"rpc-servers"`
|
||||
TrustPeriod time.Duration `mapstructure:"trust-period"`
|
||||
TrustHeight int64 `mapstructure:"trust-height"`
|
||||
TrustHash string `mapstructure:"trust-hash"`
|
||||
DiscoveryTime time.Duration `mapstructure:"discovery-time"`
|
||||
ChunkRequestTimeout time.Duration `mapstructure:"chunk-request-timeout"`
|
||||
Fetchers int32 `mapstructure:"fetchers"`
|
||||
}
|
||||
|
||||
func (cfg *StateSyncConfig) TrustHashBytes() []byte {
|
||||
@@ -763,12 +905,14 @@ func (cfg *StateSyncConfig) TrustHashBytes() []byte {
|
||||
// DefaultStateSyncConfig returns a default configuration for the state sync service
|
||||
func DefaultStateSyncConfig() *StateSyncConfig {
|
||||
return &StateSyncConfig{
|
||||
TrustPeriod: 168 * time.Hour,
|
||||
DiscoveryTime: 15 * time.Second,
|
||||
TrustPeriod: 168 * time.Hour,
|
||||
DiscoveryTime: 15 * time.Second,
|
||||
ChunkRequestTimeout: 15 * time.Second,
|
||||
Fetchers: 4,
|
||||
}
|
||||
}
|
||||
|
||||
// TestFastSyncConfig returns a default configuration for the state sync service
|
||||
// TestStateSyncConfig returns a default configuration for the state sync service
|
||||
func TestStateSyncConfig() *StateSyncConfig {
|
||||
return DefaultStateSyncConfig()
|
||||
}
|
||||
@@ -779,60 +923,78 @@ func (cfg *StateSyncConfig) ValidateBasic() error {
|
||||
if len(cfg.RPCServers) == 0 {
|
||||
return errors.New("rpc-servers is required")
|
||||
}
|
||||
|
||||
if len(cfg.RPCServers) < 2 {
|
||||
return errors.New("at least two rpc-servers entries is required")
|
||||
}
|
||||
|
||||
for _, server := range cfg.RPCServers {
|
||||
if len(server) == 0 {
|
||||
return errors.New("found empty rpc-servers entry")
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.DiscoveryTime != 0 && cfg.DiscoveryTime < 5*time.Second {
|
||||
return errors.New("discovery time must be 0s or greater than five seconds")
|
||||
}
|
||||
|
||||
if cfg.TrustPeriod <= 0 {
|
||||
return errors.New("trusted-period is required")
|
||||
}
|
||||
|
||||
if cfg.TrustHeight <= 0 {
|
||||
return errors.New("trusted-height is required")
|
||||
}
|
||||
|
||||
if len(cfg.TrustHash) == 0 {
|
||||
return errors.New("trusted-hash is required")
|
||||
}
|
||||
|
||||
_, err := hex.DecodeString(cfg.TrustHash)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid trusted-hash: %w", err)
|
||||
}
|
||||
|
||||
if cfg.ChunkRequestTimeout < 5*time.Second {
|
||||
return errors.New("chunk-request-timeout must be at least 5 seconds")
|
||||
}
|
||||
|
||||
if cfg.Fetchers <= 0 {
|
||||
return errors.New("fetchers is required")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// FastSyncConfig
|
||||
|
||||
// FastSyncConfig defines the configuration for the Tendermint fast sync service
|
||||
type FastSyncConfig struct {
|
||||
// BlockSyncConfig (formerly known as FastSync) defines the configuration for the Tendermint block sync service
|
||||
type BlockSyncConfig struct {
|
||||
Version string `mapstructure:"version"`
|
||||
}
|
||||
|
||||
// DefaultFastSyncConfig returns a default configuration for the fast sync service
|
||||
func DefaultFastSyncConfig() *FastSyncConfig {
|
||||
return &FastSyncConfig{
|
||||
Version: "v0",
|
||||
// DefaultBlockSyncConfig returns a default configuration for the block sync service
|
||||
func DefaultBlockSyncConfig() *BlockSyncConfig {
|
||||
return &BlockSyncConfig{
|
||||
Version: BlockSyncV0,
|
||||
}
|
||||
}
|
||||
|
||||
// TestFastSyncConfig returns a default configuration for the fast sync.
|
||||
func TestFastSyncConfig() *FastSyncConfig {
|
||||
return DefaultFastSyncConfig()
|
||||
// TestBlockSyncConfig returns a default configuration for the block sync.
|
||||
func TestBlockSyncConfig() *BlockSyncConfig {
|
||||
return DefaultBlockSyncConfig()
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (cfg *FastSyncConfig) ValidateBasic() error {
|
||||
func (cfg *BlockSyncConfig) ValidateBasic() error {
|
||||
switch cfg.Version {
|
||||
case "v0":
|
||||
return nil
|
||||
case "v2":
|
||||
case BlockSyncV0:
|
||||
return nil
|
||||
case BlockSyncV2:
|
||||
return errors.New("blocksync version v2 is no longer supported. Please use v0")
|
||||
default:
|
||||
return fmt.Errorf("unknown fastsync version %s", cfg.Version)
|
||||
return fmt.Errorf("unknown blocksync version %s", cfg.Version)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -846,6 +1008,7 @@ type ConsensusConfig struct {
|
||||
WalPath string `mapstructure:"wal-file"`
|
||||
walFile string // overrides WalPath if set
|
||||
|
||||
// TODO: remove timeout configs, these should be global not local
|
||||
// How long we wait for a proposal block before prevoting nil
|
||||
TimeoutPropose time.Duration `mapstructure:"timeout-propose"`
|
||||
// How much timeout-propose increases with each round
|
||||
@@ -1010,19 +1173,25 @@ func (cfg *ConsensusConfig) ValidateBasic() error {
|
||||
// TxIndexConfig defines the configuration for the transaction indexer,
|
||||
// including composite keys to index.
|
||||
type TxIndexConfig struct {
|
||||
// What indexer to use for transactions
|
||||
// The backend database list to back the indexer.
|
||||
// If list contains `null`, meaning no indexer service will be used.
|
||||
//
|
||||
// Options:
|
||||
// 1) "null"
|
||||
// 1) "null" - no indexer services.
|
||||
// 2) "kv" (default) - the simplest possible indexer,
|
||||
// backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
Indexer string `mapstructure:"indexer"`
|
||||
// 3) "psql" - the indexer services backed by PostgreSQL.
|
||||
Indexer []string `mapstructure:"indexer"`
|
||||
|
||||
// The PostgreSQL connection configuration, the connection format:
|
||||
// postgresql://<user>:<password>@<host>:<port>/<db>?<opts>
|
||||
PsqlConn string `mapstructure:"psql-conn"`
|
||||
}
|
||||
|
||||
// DefaultTxIndexConfig returns a default configuration for the transaction indexer.
|
||||
func DefaultTxIndexConfig() *TxIndexConfig {
|
||||
return &TxIndexConfig{
|
||||
Indexer: "kv",
|
||||
Indexer: []string{"kv"},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -22,12 +22,9 @@ func TestDefaultConfig(t *testing.T) {
|
||||
cfg.SetRoot("/foo")
|
||||
cfg.Genesis = "bar"
|
||||
cfg.DBPath = "/opt/data"
|
||||
cfg.Mempool.WalPath = "wal/mem/"
|
||||
|
||||
assert.Equal("/foo/bar", cfg.GenesisFile())
|
||||
assert.Equal("/opt/data", cfg.DBDir())
|
||||
assert.Equal("/foo/wal/mem", cfg.Mempool.WalDir())
|
||||
|
||||
}
|
||||
|
||||
func TestConfigValidateBasic(t *testing.T) {
|
||||
@@ -128,13 +125,13 @@ func TestStateSyncConfigValidateBasic(t *testing.T) {
|
||||
require.NoError(t, cfg.ValidateBasic())
|
||||
}
|
||||
|
||||
func TestFastSyncConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestFastSyncConfig()
|
||||
func TestBlockSyncConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestBlockSyncConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
// tamper with version
|
||||
cfg.Version = "v2"
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
|
||||
cfg.Version = "invalid"
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
|
||||
26
config/db.go
Normal file
26
config/db.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
db "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
// ServiceProvider takes a config and a logger and returns a ready to go Node.
|
||||
type ServiceProvider func(*Config, log.Logger) (service.Service, error)
|
||||
|
||||
// DBContext specifies config information for loading a new DB.
|
||||
type DBContext struct {
|
||||
ID string
|
||||
Config *Config
|
||||
}
|
||||
|
||||
// DBProvider takes a DBContext and returns an instantiated DB.
|
||||
type DBProvider func(*DBContext) (db.DB, error)
|
||||
|
||||
// DefaultDBProvider returns a database using the DBBackend and DBDir
|
||||
// specified in the Config.
|
||||
func DefaultDBProvider(ctx *DBContext) (db.DB, error) {
|
||||
dbType := db.BackendType(ctx.Config.DBBackend)
|
||||
return db.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
|
||||
}
|
||||
192
config/toml.go
192
config/toml.go
@@ -41,32 +41,29 @@ func EnsureRoot(rootDir string) {
|
||||
if err := tmos.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
|
||||
configFilePath := filepath.Join(rootDir, defaultConfigFilePath)
|
||||
|
||||
// Write default config file if missing.
|
||||
if !tmos.FileExists(configFilePath) {
|
||||
writeDefaultConfigFile(configFilePath)
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: this func should probably be called by cmd/tendermint/commands/init.go
|
||||
// alongside the writing of the genesis.json and priv_validator.json
|
||||
func writeDefaultConfigFile(configFilePath string) {
|
||||
WriteConfigFile(configFilePath, DefaultConfig())
|
||||
}
|
||||
|
||||
// WriteConfigFile renders config using the template and writes it to configFilePath.
|
||||
func WriteConfigFile(configFilePath string, config *Config) {
|
||||
// This function is called by cmd/tendermint/commands/init.go
|
||||
func WriteConfigFile(rootDir string, config *Config) {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
if err := configTemplate.Execute(&buffer, config); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
configFilePath := filepath.Join(rootDir, defaultConfigFilePath)
|
||||
|
||||
mustWriteFile(configFilePath, buffer.Bytes(), 0644)
|
||||
}
|
||||
|
||||
func writeDefaultConfigFileIfNone(rootDir string) {
|
||||
configFilePath := filepath.Join(rootDir, defaultConfigFilePath)
|
||||
if !tmos.FileExists(configFilePath) {
|
||||
WriteConfigFile(rootDir, DefaultConfig())
|
||||
}
|
||||
}
|
||||
|
||||
// Note: any changes to the comments/variables/mapstructure
|
||||
// must be reflected in the appropriate struct in config/config.go
|
||||
const defaultConfigTemplate = `# This is a TOML config file.
|
||||
@@ -88,6 +85,18 @@ proxy-app = "{{ .BaseConfig.ProxyApp }}"
|
||||
# A custom human readable name for this node
|
||||
moniker = "{{ .BaseConfig.Moniker }}"
|
||||
|
||||
# Mode of Node: full | validator | seed
|
||||
# * validator node
|
||||
# - all reactors
|
||||
# - with priv_validator_key.json, priv_validator_state.json
|
||||
# * full node
|
||||
# - all reactors
|
||||
# - No priv_validator_key.json, priv_validator_state.json
|
||||
# * seed node
|
||||
# - only P2P, PEX Reactor
|
||||
# - No priv_validator_key.json, priv_validator_state.json
|
||||
mode = "{{ .BaseConfig.Mode }}"
|
||||
|
||||
# If this node is many blocks behind the tip of the chain, FastSync
|
||||
# allows them to catchup quickly by downloading blocks in parallel
|
||||
# and verifying their commits
|
||||
@@ -128,27 +137,6 @@ log-format = "{{ .BaseConfig.LogFormat }}"
|
||||
# Path to the JSON file containing the initial validator set and other meta data
|
||||
genesis-file = "{{ js .BaseConfig.Genesis }}"
|
||||
|
||||
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
priv-validator-key-file = "{{ js .BaseConfig.PrivValidatorKey }}"
|
||||
|
||||
# Path to the JSON file containing the last sign state of a validator
|
||||
priv-validator-state-file = "{{ js .BaseConfig.PrivValidatorState }}"
|
||||
|
||||
# TCP or UNIX socket address for Tendermint to listen on for
|
||||
# connections from an external PrivValidator process
|
||||
# when the listenAddr is prefixed with grpc instead of tcp it will use the gRPC Client
|
||||
priv-validator-laddr = "{{ .BaseConfig.PrivValidatorListenAddr }}"
|
||||
|
||||
# Client certificate generated while creating needed files for secure connection.
|
||||
# If a remote validator address is provided but no certificate, the connection will be insecure
|
||||
priv-validator-client-certificate-file = "{{ js .BaseConfig.PrivValidatorClientCertificate }}"
|
||||
|
||||
# Client key generated while creating certificates for secure connection
|
||||
priv-validator-client-key-file = "{{ js .BaseConfig.PrivValidatorClientKey }}"
|
||||
|
||||
# Path Root Certificate Authority used to sign both client and server certificates
|
||||
priv-validator-certificate-authority = "{{ js .BaseConfig.PrivValidatorRootCA }}"
|
||||
|
||||
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
|
||||
node-key-file = "{{ js .BaseConfig.NodeKey }}"
|
||||
|
||||
@@ -160,6 +148,33 @@ abci = "{{ .BaseConfig.ABCI }}"
|
||||
filter-peers = {{ .BaseConfig.FilterPeers }}
|
||||
|
||||
|
||||
#######################################################
|
||||
### Priv Validator Configuration ###
|
||||
#######################################################
|
||||
[priv-validator]
|
||||
|
||||
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
key-file = "{{ js .PrivValidator.Key }}"
|
||||
|
||||
# Path to the JSON file containing the last sign state of a validator
|
||||
state-file = "{{ js .PrivValidator.State }}"
|
||||
|
||||
# TCP or UNIX socket address for Tendermint to listen on for
|
||||
# connections from an external PrivValidator process
|
||||
# when the listenAddr is prefixed with grpc instead of tcp it will use the gRPC Client
|
||||
laddr = "{{ .PrivValidator.ListenAddr }}"
|
||||
|
||||
# Client certificate generated while creating needed files for secure connection.
|
||||
# If a remote validator address is provided but no certificate, the connection will be insecure
|
||||
client-certificate-file = "{{ js .PrivValidator.ClientCertificate }}"
|
||||
|
||||
# Client key generated while creating certificates for secure connection
|
||||
validator-client-key-file = "{{ js .PrivValidator.ClientKey }}"
|
||||
|
||||
# Path Root Certificate Authority used to sign both client and server certificates
|
||||
certificate-authority = "{{ js .PrivValidator.RootCA }}"
|
||||
|
||||
|
||||
#######################################################################
|
||||
### Advanced Configuration Options ###
|
||||
#######################################################################
|
||||
@@ -185,6 +200,7 @@ cors-allowed-headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}
|
||||
|
||||
# TCP or UNIX socket address for the gRPC server to listen on
|
||||
# NOTE: This server only supports /broadcast_tx_commit
|
||||
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
|
||||
grpc-laddr = "{{ .RPC.GRPCListenAddress }}"
|
||||
|
||||
# Maximum number of simultaneous connections.
|
||||
@@ -194,6 +210,7 @@ grpc-laddr = "{{ .RPC.GRPCListenAddress }}"
|
||||
# 0 - unlimited.
|
||||
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
|
||||
# 1024 - 40 - 10 - 50 = 924 = ~900
|
||||
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
|
||||
grpc-max-open-connections = {{ .RPC.GRPCMaxOpenConnections }}
|
||||
|
||||
# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool
|
||||
@@ -231,7 +248,7 @@ max-body-bytes = {{ .RPC.MaxBodyBytes }}
|
||||
max-header-bytes = {{ .RPC.MaxHeaderBytes }}
|
||||
|
||||
# The path to a file containing certificate that is used to create the HTTPS server.
|
||||
# Migth be either absolute path or path related to tendermint's config directory.
|
||||
# Might be either absolute path or path related to Tendermint's config directory.
|
||||
# If the certificate is signed by a certificate authority,
|
||||
# the certFile should be the concatenation of the server's certificate, any intermediates,
|
||||
# and the CA's certificate.
|
||||
@@ -240,7 +257,7 @@ max-header-bytes = {{ .RPC.MaxHeaderBytes }}
|
||||
tls-cert-file = "{{ .RPC.TLSCertFile }}"
|
||||
|
||||
# The path to a file containing matching private key that is used to create the HTTPS server.
|
||||
# Migth be either absolute path or path related to tendermint's config directory.
|
||||
# Might be either absolute path or path related to Tendermint's config directory.
|
||||
# NOTE: both tls-cert-file and tls-key-file must be present for Tendermint to create HTTPS server.
|
||||
# Otherwise, HTTP server is run.
|
||||
tls-key-file = "{{ .RPC.TLSKeyFile }}"
|
||||
@@ -253,18 +270,34 @@ pprof-laddr = "{{ .RPC.PprofListenAddress }}"
|
||||
#######################################################
|
||||
[p2p]
|
||||
|
||||
# Enable the new p2p layer.
|
||||
disable-legacy = {{ .P2P.DisableLegacy }}
|
||||
|
||||
# Select the p2p internal queue
|
||||
queue-type = "{{ .P2P.QueueType }}"
|
||||
|
||||
# Address to listen for incoming connections
|
||||
laddr = "{{ .P2P.ListenAddress }}"
|
||||
|
||||
# Address to advertise to peers for them to dial
|
||||
# If empty, will use the same port as the laddr,
|
||||
# and will introspect on the listener or use UPnP
|
||||
# to figure out the address.
|
||||
# to figure out the address. ip and port are required
|
||||
# example: 159.89.10.97:26656
|
||||
external-address = "{{ .P2P.ExternalAddress }}"
|
||||
|
||||
# Comma separated list of seed nodes to connect to
|
||||
# We only use these if we can’t connect to peers in the addrbook
|
||||
# NOTE: not used by the new PEX reactor. Please use BootstrapPeers instead.
|
||||
# TODO: Remove once p2p refactor is complete
|
||||
# ref: https:#github.com/tendermint/tendermint/issues/5670
|
||||
seeds = "{{ .P2P.Seeds }}"
|
||||
|
||||
# Comma separated list of peers to be added to the peer store
|
||||
# on startup. Either BootstrapPeers or PersistentPeers are
|
||||
# needed for peer discovery
|
||||
bootstrap-peers = "{{ .P2P.BootstrapPeers }}"
|
||||
|
||||
# Comma separated list of nodes to keep persistent connections to
|
||||
persistent-peers = "{{ .P2P.PersistentPeers }}"
|
||||
|
||||
@@ -279,11 +312,23 @@ addr-book-file = "{{ js .P2P.AddrBook }}"
|
||||
addr-book-strict = {{ .P2P.AddrBookStrict }}
|
||||
|
||||
# Maximum number of inbound peers
|
||||
#
|
||||
# TODO: Remove once p2p refactor is complete in favor of MaxConnections.
|
||||
# ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
max-num-inbound-peers = {{ .P2P.MaxNumInboundPeers }}
|
||||
|
||||
# Maximum number of outbound peers to connect to, excluding persistent peers
|
||||
#
|
||||
# TODO: Remove once p2p refactor is complete in favor of MaxConnections.
|
||||
# ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
max-num-outbound-peers = {{ .P2P.MaxNumOutboundPeers }}
|
||||
|
||||
# Maximum number of connections (inbound and outbound).
|
||||
max-connections = {{ .P2P.MaxConnections }}
|
||||
|
||||
# Rate limits the number of incoming connection attempts per IP address.
|
||||
max-incoming-connection-attempts = {{ .P2P.MaxIncomingConnectionAttempts }}
|
||||
|
||||
# List of node IDs, to which a connection will be (re)established ignoring any existing limits
|
||||
unconditional-peer-ids = "{{ .P2P.UnconditionalPeerIDs }}"
|
||||
|
||||
@@ -305,13 +350,8 @@ recv-rate = {{ .P2P.RecvRate }}
|
||||
# Set true to enable the peer-exchange reactor
|
||||
pex = {{ .P2P.PexReactor }}
|
||||
|
||||
# Seed mode, in which node constantly crawls the network and looks for
|
||||
# peers. If another node asks it for addresses, it responds and disconnects.
|
||||
#
|
||||
# Does not work if the peer-exchange reactor is disabled.
|
||||
seed-mode = {{ .P2P.SeedMode }}
|
||||
|
||||
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
|
||||
# Warning: IPs will be exposed at /net_info, for more information https://github.com/tendermint/tendermint/issues/3055
|
||||
private-peer-ids = "{{ .P2P.PrivatePeerIDs }}"
|
||||
|
||||
# Toggle to disable guard against peers connecting from the same ip.
|
||||
@@ -322,13 +362,17 @@ handshake-timeout = "{{ .P2P.HandshakeTimeout }}"
|
||||
dial-timeout = "{{ .P2P.DialTimeout }}"
|
||||
|
||||
#######################################################
|
||||
### Mempool Configurattion Option ###
|
||||
### Mempool Configuration Option ###
|
||||
#######################################################
|
||||
[mempool]
|
||||
|
||||
# Mempool version to use:
|
||||
# 1) "v0" - The legacy non-prioritized mempool reactor.
|
||||
# 2) "v1" (default) - The prioritized mempool reactor.
|
||||
version = "{{ .Mempool.Version }}"
|
||||
|
||||
recheck = {{ .Mempool.Recheck }}
|
||||
broadcast = {{ .Mempool.Broadcast }}
|
||||
wal-dir = "{{ js .Mempool.WalPath }}"
|
||||
|
||||
# Maximum number of transactions in the mempool
|
||||
size = {{ .Mempool.Size }}
|
||||
@@ -355,6 +399,22 @@ max-tx-bytes = {{ .Mempool.MaxTxBytes }}
|
||||
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
|
||||
max-batch-bytes = {{ .Mempool.MaxBatchBytes }}
|
||||
|
||||
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
|
||||
# can exist for in the mempool.
|
||||
#
|
||||
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
|
||||
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
|
||||
# insertion time into the mempool is beyond ttl-duration.
|
||||
ttl-duration = "{{ .Mempool.TTLDuration }}"
|
||||
|
||||
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
|
||||
# can exist for in the mempool.
|
||||
#
|
||||
# Note, if ttl-duration is also defined, a transaction will be removed if it
|
||||
# has existed in the mempool at least ttl-num-blocks number of blocks or if
|
||||
# it's insertion time into the mempool is beyond ttl-duration.
|
||||
ttl-num-blocks = {{ .Mempool.TTLNumBlocks }}
|
||||
|
||||
#######################################################
|
||||
### State Sync Configuration Options ###
|
||||
#######################################################
|
||||
@@ -384,15 +444,22 @@ discovery-time = "{{ .StateSync.DiscoveryTime }}"
|
||||
# Will create a new, randomly named directory within, and remove it when done.
|
||||
temp-dir = "{{ .StateSync.TempDir }}"
|
||||
|
||||
# The timeout duration before re-requesting a chunk, possibly from a different
|
||||
# peer (default: 15 seconds).
|
||||
chunk-request-timeout = "{{ .StateSync.ChunkRequestTimeout }}"
|
||||
|
||||
# The number of concurrent chunk and block fetchers to run (default: 4).
|
||||
fetchers = "{{ .StateSync.Fetchers }}"
|
||||
|
||||
#######################################################
|
||||
### Fast Sync Configuration Connections ###
|
||||
### Block Sync Configuration Connections ###
|
||||
#######################################################
|
||||
[fastsync]
|
||||
|
||||
# Fast Sync version to use:
|
||||
# 1) "v0" (default) - the legacy fast sync implementation
|
||||
# 2) "v2" - complete redesign of v0, optimized for testability & readability
|
||||
version = "{{ .FastSync.Version }}"
|
||||
# Block Sync version to use:
|
||||
# 1) "v0" (default) - the legacy block sync implementation
|
||||
# 2) "v2" - DEPRECATED, please use v0
|
||||
version = "{{ .BlockSync.Version }}"
|
||||
|
||||
#######################################################
|
||||
### Consensus Configuration Options ###
|
||||
@@ -440,7 +507,8 @@ peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
|
||||
#######################################################
|
||||
[tx-index]
|
||||
|
||||
# What indexer to use for transactions
|
||||
# The backend database list to back the indexer.
|
||||
# If list contains null, meaning no indexer service will be used.
|
||||
#
|
||||
# The application will set which txs to index. In some cases a node operator will be able
|
||||
# to decide which txs to index based on configuration set in the application.
|
||||
@@ -449,7 +517,12 @@ peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
|
||||
# 1) "null"
|
||||
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed.
|
||||
indexer = "{{ .TxIndex.Indexer }}"
|
||||
# 3) "psql" - the indexer services backed by PostgreSQL.
|
||||
indexer = [{{ range $i, $e := .TxIndex.Indexer }}{{if $i}}, {{end}}{{ printf "%q" $e}}{{end}}]
|
||||
|
||||
# The PostgreSQL connection configuration, the connection format:
|
||||
# postgresql://<user>:<password>@<host>:<port>/<db>?<opts>
|
||||
psql-conn = "{{ .TxIndex.PsqlConn }}"
|
||||
|
||||
#######################################################
|
||||
### Instrumentation Configuration Options ###
|
||||
@@ -494,16 +567,13 @@ func ResetTestRootWithChainID(testName string, chainID string) *Config {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
baseConfig := DefaultBaseConfig()
|
||||
configFilePath := filepath.Join(rootDir, defaultConfigFilePath)
|
||||
genesisFilePath := filepath.Join(rootDir, baseConfig.Genesis)
|
||||
privKeyFilePath := filepath.Join(rootDir, baseConfig.PrivValidatorKey)
|
||||
privStateFilePath := filepath.Join(rootDir, baseConfig.PrivValidatorState)
|
||||
conf := DefaultConfig()
|
||||
genesisFilePath := filepath.Join(rootDir, conf.Genesis)
|
||||
privKeyFilePath := filepath.Join(rootDir, conf.PrivValidator.Key)
|
||||
privStateFilePath := filepath.Join(rootDir, conf.PrivValidator.State)
|
||||
|
||||
// Write default config file if missing.
|
||||
if !tmos.FileExists(configFilePath) {
|
||||
writeDefaultConfigFile(configFilePath)
|
||||
}
|
||||
writeDefaultConfigFileIfNone(rootDir)
|
||||
if !tmos.FileExists(genesisFilePath) {
|
||||
if chainID == "" {
|
||||
chainID = "tendermint_test"
|
||||
|
||||
@@ -30,6 +30,8 @@ func TestEnsureRoot(t *testing.T) {
|
||||
// create root dir
|
||||
EnsureRoot(tmpDir)
|
||||
|
||||
WriteConfigFile(tmpDir, DefaultConfig())
|
||||
|
||||
// make sure config is set properly
|
||||
data, err := ioutil.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath))
|
||||
require.Nil(err)
|
||||
@@ -61,7 +63,8 @@ func TestEnsureTestRoot(t *testing.T) {
|
||||
|
||||
// TODO: make sure the cfg returned and testconfig are the same!
|
||||
baseConfig := DefaultBaseConfig()
|
||||
ensureFiles(t, rootDir, defaultDataDir, baseConfig.Genesis, baseConfig.PrivValidatorKey, baseConfig.PrivValidatorState)
|
||||
pvConfig := DefaultPrivValidatorConfig()
|
||||
ensureFiles(t, rootDir, defaultDataDir, baseConfig.Genesis, pvConfig.Key, pvConfig.State)
|
||||
}
|
||||
|
||||
func checkConfig(configFile string) bool {
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
# Consensus
|
||||
|
||||
See the [consensus spec](https://github.com/tendermint/spec/tree/master/spec/consensus) and the [reactor consensus spec](https://github.com/tendermint/spec/tree/master/spec/reactors/consensus) for more information.
|
||||
@@ -1,470 +0,0 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/evidence"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
//----------------------------------------------
|
||||
// byzantine failures
|
||||
|
||||
// Byzantine node sends two different prevotes (nil and blockID) to the same validator
|
||||
func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
const nValidators = 4
|
||||
const byzantineNode = 0
|
||||
const prevoteHeight = int64(2)
|
||||
testName := "consensus_byzantine_test"
|
||||
tickerFunc := newMockTickerFunc(true)
|
||||
appFunc := newCounter
|
||||
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
||||
css := make([]*State, nValidators)
|
||||
|
||||
for i := 0; i < nValidators; i++ {
|
||||
logger := consensusLogger().With("test", "byzantine", "validator", i)
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
defer os.RemoveAll(thisConfig.RootDir)
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
app := appFunc()
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(tmsync.Mutex)
|
||||
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
|
||||
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
|
||||
|
||||
// Make Mempool
|
||||
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
|
||||
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
|
||||
if thisConfig.Consensus.WaitForTxs() {
|
||||
mempool.EnableTxsAvailable()
|
||||
}
|
||||
|
||||
// Make a full instance of the evidence pool
|
||||
evidenceDB := dbm.NewMemDB()
|
||||
evpool, err := evidence.NewPool(logger.With("module", "evidence"), evidenceDB, stateStore, blockStore)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make State
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
|
||||
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
|
||||
cs.SetLogger(cs.Logger)
|
||||
// set private validator
|
||||
pv := privVals[i]
|
||||
cs.SetPrivValidator(pv)
|
||||
|
||||
eventBus := types.NewEventBus()
|
||||
eventBus.SetLogger(log.TestingLogger().With("module", "events"))
|
||||
err = eventBus.Start()
|
||||
require.NoError(t, err)
|
||||
cs.SetEventBus(eventBus)
|
||||
|
||||
cs.SetTimeoutTicker(tickerFunc())
|
||||
cs.SetLogger(logger)
|
||||
|
||||
css[i] = cs
|
||||
}
|
||||
|
||||
// initialize the reactors for each of the validators
|
||||
reactors := make([]*Reactor, nValidators)
|
||||
blocksSubs := make([]types.Subscription, 0)
|
||||
eventBuses := make([]*types.EventBus, nValidators)
|
||||
for i := 0; i < nValidators; i++ {
|
||||
reactors[i] = NewReactor(css[i], true) // so we dont start the consensus states
|
||||
reactors[i].SetLogger(css[i].Logger)
|
||||
|
||||
// eventBus is already started with the cs
|
||||
eventBuses[i] = css[i].eventBus
|
||||
reactors[i].SetEventBus(eventBuses[i])
|
||||
|
||||
blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, 100)
|
||||
require.NoError(t, err)
|
||||
blocksSubs = append(blocksSubs, blocksSub)
|
||||
|
||||
if css[i].state.LastBlockHeight == 0 { // simulate handle initChain in handshake
|
||||
err = css[i].blockExec.Store().Save(css[i].state)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
// make connected switches and start all reactors
|
||||
p2p.MakeConnectedSwitches(config.P2P, nValidators, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("CONSENSUS", reactors[i])
|
||||
s.SetLogger(reactors[i].conS.Logger.With("module", "p2p"))
|
||||
return s
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
// create byzantine validator
|
||||
bcs := css[byzantineNode]
|
||||
|
||||
// alter prevote so that the byzantine node double votes when height is 2
|
||||
bcs.doPrevote = func(height int64, round int32) {
|
||||
// allow first height to happen normally so that byzantine validator is no longer proposer
|
||||
if height == prevoteHeight {
|
||||
bcs.Logger.Info("Sending two votes")
|
||||
prevote1, err := bcs.signVote(tmproto.PrevoteType, bcs.ProposalBlock.Hash(), bcs.ProposalBlockParts.Header())
|
||||
require.NoError(t, err)
|
||||
prevote2, err := bcs.signVote(tmproto.PrevoteType, nil, types.PartSetHeader{})
|
||||
require.NoError(t, err)
|
||||
peerList := reactors[byzantineNode].Switch.Peers().List()
|
||||
bcs.Logger.Info("Getting peer list", "peers", peerList)
|
||||
// send two votes to all peers (1st to one half, 2nd to another half)
|
||||
for i, peer := range peerList {
|
||||
if i < len(peerList)/2 {
|
||||
bcs.Logger.Info("Signed and pushed vote", "vote", prevote1, "peer", peer)
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote1}))
|
||||
} else {
|
||||
bcs.Logger.Info("Signed and pushed vote", "vote", prevote2, "peer", peer)
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote2}))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
bcs.Logger.Info("Behaving normally")
|
||||
bcs.defaultDoPrevote(height, round)
|
||||
}
|
||||
}
|
||||
|
||||
// start the consensus reactors
|
||||
for i := 0; i < nValidators; i++ {
|
||||
s := reactors[i].conS.GetState()
|
||||
reactors[i].SwitchToConsensus(s, false)
|
||||
}
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
// Evidence should be submitted and committed at the third height but
|
||||
// we will check the first six just in case
|
||||
evidenceFromEachValidator := make([]types.Evidence, nValidators)
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
for i := 0; i < nValidators; i++ {
|
||||
wg.Add(1)
|
||||
go func(i int) {
|
||||
defer wg.Done()
|
||||
for msg := range blocksSubs[i].Out() {
|
||||
block := msg.Data().(types.EventDataNewBlock).Block
|
||||
if len(block.Evidence.Evidence) != 0 {
|
||||
evidenceFromEachValidator[i] = block.Evidence.Evidence[0]
|
||||
return
|
||||
}
|
||||
}
|
||||
}(i)
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
pubkey, err := bcs.privValidator.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
for idx, ev := range evidenceFromEachValidator {
|
||||
if assert.NotNil(t, ev, idx) {
|
||||
ev, ok := ev.(*types.DuplicateVoteEvidence)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, pubkey.Address(), ev.VoteA.ValidatorAddress)
|
||||
assert.Equal(t, prevoteHeight, ev.Height())
|
||||
}
|
||||
}
|
||||
case <-time.After(20 * time.Second):
|
||||
for i, reactor := range reactors {
|
||||
t.Logf("Consensus Reactor %d\n%v", i, reactor)
|
||||
}
|
||||
t.Fatalf("Timed out waiting for validators to commit evidence")
|
||||
}
|
||||
}
|
||||
|
||||
// 4 validators. 1 is byzantine. The other three are partitioned into A (1 val) and B (2 vals).
|
||||
// byzantine validator sends conflicting proposals into A and B,
|
||||
// and prevotes/precommits on both of them.
|
||||
// B sees a commit, A doesn't.
|
||||
// Heal partition and ensure A sees the commit
|
||||
func TestByzantineConflictingProposalsWithPartition(t *testing.T) {
|
||||
N := 4
|
||||
logger := consensusLogger().With("test", "byzantine")
|
||||
app := newCounter
|
||||
css, cleanup := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), app)
|
||||
defer cleanup()
|
||||
|
||||
// give the byzantine validator a normal ticker
|
||||
ticker := NewTimeoutTicker()
|
||||
ticker.SetLogger(css[0].Logger)
|
||||
css[0].SetTimeoutTicker(ticker)
|
||||
|
||||
p2pLogger := logger.With("module", "p2p")
|
||||
|
||||
blocksSubs := make([]types.Subscription, N)
|
||||
reactors := make([]p2p.Reactor, N)
|
||||
for i := 0; i < N; i++ {
|
||||
|
||||
// enable txs so we can create different proposals
|
||||
assertMempool(css[i].txNotifier).EnableTxsAvailable()
|
||||
|
||||
eventBus := css[i].eventBus
|
||||
eventBus.SetLogger(logger.With("module", "events", "validator", i))
|
||||
|
||||
var err error
|
||||
blocksSubs[i], err = eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
conR := NewReactor(css[i], true) // so we don't start the consensus states
|
||||
conR.SetLogger(logger.With("validator", i))
|
||||
conR.SetEventBus(eventBus)
|
||||
|
||||
var conRI p2p.Reactor = conR
|
||||
|
||||
// make first val byzantine
|
||||
if i == 0 {
|
||||
conRI = NewByzantineReactor(conR)
|
||||
}
|
||||
|
||||
reactors[i] = conRI
|
||||
err = css[i].blockExec.Store().Save(css[i].state) // for save height 1's validators info
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
switches := p2p.MakeConnectedSwitches(config.P2P, N, func(i int, sw *p2p.Switch) *p2p.Switch {
|
||||
sw.SetLogger(p2pLogger.With("validator", i))
|
||||
sw.AddReactor("CONSENSUS", reactors[i])
|
||||
return sw
|
||||
}, func(sws []*p2p.Switch, i, j int) {
|
||||
// the network starts partitioned with globally active adversary
|
||||
if i != 0 {
|
||||
return
|
||||
}
|
||||
p2p.Connect2Switches(sws, i, j)
|
||||
})
|
||||
|
||||
// make first val byzantine
|
||||
// NOTE: Now, test validators are MockPV, which by default doesn't
|
||||
// do any safety checks.
|
||||
css[0].privValidator.(types.MockPV).DisableChecks()
|
||||
css[0].decideProposal = func(j int32) func(int64, int32) {
|
||||
return func(height int64, round int32) {
|
||||
byzantineDecideProposalFunc(t, height, round, css[j], switches[j])
|
||||
}
|
||||
}(int32(0))
|
||||
// We are setting the prevote function to do nothing because the prevoting
|
||||
// and precommitting are done alongside the proposal.
|
||||
css[0].doPrevote = func(height int64, round int32) {}
|
||||
|
||||
defer func() {
|
||||
for _, sw := range switches {
|
||||
err := sw.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
// start the non-byz state machines.
|
||||
// note these must be started before the byz
|
||||
for i := 1; i < N; i++ {
|
||||
cr := reactors[i].(*Reactor)
|
||||
cr.SwitchToConsensus(cr.conS.GetState(), false)
|
||||
}
|
||||
|
||||
// start the byzantine state machine
|
||||
byzR := reactors[0].(*ByzantineReactor)
|
||||
s := byzR.reactor.conS.GetState()
|
||||
byzR.reactor.SwitchToConsensus(s, false)
|
||||
|
||||
// byz proposer sends one block to peers[0]
|
||||
// and the other block to peers[1] and peers[2].
|
||||
// note peers and switches order don't match.
|
||||
peers := switches[0].Peers().List()
|
||||
|
||||
// partition A
|
||||
ind0 := getSwitchIndex(switches, peers[0])
|
||||
|
||||
// partition B
|
||||
ind1 := getSwitchIndex(switches, peers[1])
|
||||
ind2 := getSwitchIndex(switches, peers[2])
|
||||
p2p.Connect2Switches(switches, ind1, ind2)
|
||||
|
||||
// wait for someone in the big partition (B) to make a block
|
||||
<-blocksSubs[ind2].Out()
|
||||
|
||||
t.Log("A block has been committed. Healing partition")
|
||||
p2p.Connect2Switches(switches, ind0, ind1)
|
||||
p2p.Connect2Switches(switches, ind0, ind2)
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
// (one of them already has)
|
||||
wg := new(sync.WaitGroup)
|
||||
for i := 1; i < N-1; i++ {
|
||||
wg.Add(1)
|
||||
go func(j int) {
|
||||
<-blocksSubs[j].Out()
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
tick := time.NewTicker(time.Second * 10)
|
||||
select {
|
||||
case <-done:
|
||||
case <-tick.C:
|
||||
for i, reactor := range reactors {
|
||||
t.Log(fmt.Sprintf("Consensus Reactor %v", i))
|
||||
t.Log(fmt.Sprintf("%v", reactor))
|
||||
}
|
||||
t.Fatalf("Timed out waiting for all validators to commit first block")
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------
|
||||
// byzantine consensus functions
|
||||
|
||||
func byzantineDecideProposalFunc(t *testing.T, height int64, round int32, cs *State, sw *p2p.Switch) {
|
||||
// byzantine user should create two proposals and try to split the vote.
|
||||
// Avoid sending on internalMsgQueue and running consensus state.
|
||||
|
||||
// Create a new proposal block from state/txs from the mempool.
|
||||
block1, blockParts1 := cs.createProposalBlock()
|
||||
polRound, propBlockID := cs.ValidRound, types.BlockID{Hash: block1.Hash(), PartSetHeader: blockParts1.Header()}
|
||||
proposal1 := types.NewProposal(height, round, polRound, propBlockID)
|
||||
p1 := proposal1.ToProto()
|
||||
if err := cs.privValidator.SignProposal(cs.state.ChainID, p1); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
proposal1.Signature = p1.Signature
|
||||
|
||||
// some new transactions come in (this ensures that the proposals are different)
|
||||
deliverTxsRange(cs, 0, 1)
|
||||
|
||||
// Create a new proposal block from state/txs from the mempool.
|
||||
block2, blockParts2 := cs.createProposalBlock()
|
||||
polRound, propBlockID = cs.ValidRound, types.BlockID{Hash: block2.Hash(), PartSetHeader: blockParts2.Header()}
|
||||
proposal2 := types.NewProposal(height, round, polRound, propBlockID)
|
||||
p2 := proposal2.ToProto()
|
||||
if err := cs.privValidator.SignProposal(cs.state.ChainID, p2); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
proposal2.Signature = p2.Signature
|
||||
|
||||
block1Hash := block1.Hash()
|
||||
block2Hash := block2.Hash()
|
||||
|
||||
// broadcast conflicting proposals/block parts to peers
|
||||
peers := sw.Peers().List()
|
||||
t.Logf("Byzantine: broadcasting conflicting proposals to %d peers", len(peers))
|
||||
for i, peer := range peers {
|
||||
if i < len(peers)/2 {
|
||||
go sendProposalAndParts(height, round, cs, peer, proposal1, block1Hash, blockParts1)
|
||||
} else {
|
||||
go sendProposalAndParts(height, round, cs, peer, proposal2, block2Hash, blockParts2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func sendProposalAndParts(
|
||||
height int64,
|
||||
round int32,
|
||||
cs *State,
|
||||
peer p2p.Peer,
|
||||
proposal *types.Proposal,
|
||||
blockHash []byte,
|
||||
parts *types.PartSet,
|
||||
) {
|
||||
// proposal
|
||||
msg := &ProposalMessage{Proposal: proposal}
|
||||
peer.Send(DataChannel, MustEncode(msg))
|
||||
|
||||
// parts
|
||||
for i := 0; i < int(parts.Total()); i++ {
|
||||
part := parts.GetPart(i)
|
||||
msg := &BlockPartMessage{
|
||||
Height: height, // This tells peer that this part applies to us.
|
||||
Round: round, // This tells peer that this part applies to us.
|
||||
Part: part,
|
||||
}
|
||||
peer.Send(DataChannel, MustEncode(msg))
|
||||
}
|
||||
|
||||
// votes
|
||||
cs.mtx.Lock()
|
||||
prevote, _ := cs.signVote(tmproto.PrevoteType, blockHash, parts.Header())
|
||||
precommit, _ := cs.signVote(tmproto.PrecommitType, blockHash, parts.Header())
|
||||
cs.mtx.Unlock()
|
||||
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{prevote}))
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// byzantine consensus reactor
|
||||
|
||||
type ByzantineReactor struct {
|
||||
service.Service
|
||||
reactor *Reactor
|
||||
}
|
||||
|
||||
func NewByzantineReactor(conR *Reactor) *ByzantineReactor {
|
||||
return &ByzantineReactor{
|
||||
Service: conR,
|
||||
reactor: conR,
|
||||
}
|
||||
}
|
||||
|
||||
func (br *ByzantineReactor) SetSwitch(s *p2p.Switch) { br.reactor.SetSwitch(s) }
|
||||
func (br *ByzantineReactor) GetChannels() []*p2p.ChannelDescriptor { return br.reactor.GetChannels() }
|
||||
func (br *ByzantineReactor) AddPeer(peer p2p.Peer) {
|
||||
if !br.reactor.IsRunning() {
|
||||
return
|
||||
}
|
||||
|
||||
// Create peerState for peer
|
||||
peerState := NewPeerState(peer).SetLogger(br.reactor.Logger)
|
||||
peer.Set(types.PeerStateKey, peerState)
|
||||
|
||||
// Send our state to peer.
|
||||
// If we're syncing, broadcast a RoundStepMessage later upon SwitchToConsensus().
|
||||
if !br.reactor.waitSync {
|
||||
br.reactor.sendNewRoundStepMessage(peer)
|
||||
}
|
||||
}
|
||||
func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
br.reactor.RemovePeer(peer, reason)
|
||||
}
|
||||
func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
br.reactor.Receive(chID, peer, msgBytes)
|
||||
}
|
||||
func (br *ByzantineReactor) InitPeer(peer p2p.Peer) p2p.Peer { return peer }
|
||||
@@ -1,100 +0,0 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/bytes"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
//----------------------------------------------
|
||||
// byzantine failures
|
||||
|
||||
// one byz val sends a precommit for a random block at each height
|
||||
// Ensure a testnet makes blocks
|
||||
func TestReactorInvalidPrecommit(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
t.Cleanup(cleanup)
|
||||
|
||||
for i := 0; i < 4; i++ {
|
||||
ticker := NewTimeoutTicker()
|
||||
ticker.SetLogger(css[i].Logger)
|
||||
css[i].SetTimeoutTicker(ticker)
|
||||
|
||||
}
|
||||
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
|
||||
|
||||
// this val sends a random precommit at each height
|
||||
byzValIdx := 0
|
||||
byzVal := css[byzValIdx]
|
||||
byzR := reactors[byzValIdx]
|
||||
|
||||
// update the doPrevote function to just send a valid precommit for a random block
|
||||
// and otherwise disable the priv validator
|
||||
byzVal.mtx.Lock()
|
||||
pv := byzVal.privValidator
|
||||
byzVal.doPrevote = func(height int64, round int32) {
|
||||
invalidDoPrevoteFunc(t, height, round, byzVal, byzR.Switch, pv)
|
||||
}
|
||||
byzVal.mtx.Unlock()
|
||||
t.Cleanup(func() { stopConsensusNet(log.TestingLogger(), reactors, eventBuses) })
|
||||
|
||||
// wait for a bunch of blocks
|
||||
// TODO: make this tighter by ensuring the halt happens by block 2
|
||||
for i := 0; i < 10; i++ {
|
||||
timeoutWaitGroup(t, N, func(j int) {
|
||||
<-blocksSubs[j].Out()
|
||||
}, css)
|
||||
}
|
||||
}
|
||||
|
||||
func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, sw *p2p.Switch, pv types.PrivValidator) {
|
||||
// routine to:
|
||||
// - precommit for a random block
|
||||
// - send precommit to all peers
|
||||
// - disable privValidator (so we don't do normal precommits)
|
||||
go func() {
|
||||
cs.mtx.Lock()
|
||||
cs.privValidator = pv
|
||||
pubKey, err := cs.privValidator.GetPubKey()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
addr := pubKey.Address()
|
||||
valIndex, _ := cs.Validators.GetByAddress(addr)
|
||||
|
||||
// precommit a random block
|
||||
blockHash := bytes.HexBytes(tmrand.Bytes(32))
|
||||
precommit := &types.Vote{
|
||||
ValidatorAddress: addr,
|
||||
ValidatorIndex: valIndex,
|
||||
Height: cs.Height,
|
||||
Round: cs.Round,
|
||||
Timestamp: cs.voteTime(),
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: types.BlockID{
|
||||
Hash: blockHash,
|
||||
PartSetHeader: types.PartSetHeader{Total: 1, Hash: tmrand.Bytes(32)}},
|
||||
}
|
||||
p := precommit.ToProto()
|
||||
err = cs.privValidator.SignVote(cs.state.ChainID, p)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
precommit.Signature = p.Signature
|
||||
cs.privValidator = nil // disable priv val so we don't do normal votes
|
||||
cs.mtx.Unlock()
|
||||
|
||||
peers := sw.Peers().List()
|
||||
for _, peer := range peers {
|
||||
cs.Logger.Info("Sending bad vote", "block", blockHash, "peer", peer)
|
||||
peer.Send(VoteChannel, MustEncode(&VoteMessage{precommit}))
|
||||
}
|
||||
}()
|
||||
}
|
||||
1726
consensus/reactor.go
1726
consensus/reactor.go
File diff suppressed because it is too large
Load Diff
@@ -1,974 +0,0 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"runtime"
|
||||
"runtime/pprof"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"github.com/tendermint/tendermint/libs/bits"
|
||||
"github.com/tendermint/tendermint/libs/bytes"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
mempl "github.com/tendermint/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
p2pmock "github.com/tendermint/tendermint/p2p/mock"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
statemocks "github.com/tendermint/tendermint/state/mocks"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
//----------------------------------------------
|
||||
// in-process testnets
|
||||
|
||||
var defaultTestTime = time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC)
|
||||
|
||||
func startConsensusNet(t *testing.T, css []*State, n int) (
|
||||
[]*Reactor,
|
||||
[]types.Subscription,
|
||||
[]*types.EventBus,
|
||||
) {
|
||||
reactors := make([]*Reactor, n)
|
||||
blocksSubs := make([]types.Subscription, 0)
|
||||
eventBuses := make([]*types.EventBus, n)
|
||||
for i := 0; i < n; i++ {
|
||||
/*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info")
|
||||
if err != nil { t.Fatal(err)}*/
|
||||
reactors[i] = NewReactor(css[i], true) // so we dont start the consensus states
|
||||
reactors[i].SetLogger(css[i].Logger)
|
||||
|
||||
// eventBus is already started with the cs
|
||||
eventBuses[i] = css[i].eventBus
|
||||
reactors[i].SetEventBus(eventBuses[i])
|
||||
|
||||
blocksSub, err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
|
||||
require.NoError(t, err)
|
||||
blocksSubs = append(blocksSubs, blocksSub)
|
||||
|
||||
if css[i].state.LastBlockHeight == 0 { // simulate handle initChain in handshake
|
||||
if err := css[i].blockExec.Store().Save(css[i].state); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
// make connected switches and start all reactors
|
||||
p2p.MakeConnectedSwitches(config.P2P, n, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("CONSENSUS", reactors[i])
|
||||
s.SetLogger(reactors[i].conS.Logger.With("module", "p2p"))
|
||||
return s
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
// now that everyone is connected, start the state machines
|
||||
// If we started the state machines before everyone was connected,
|
||||
// we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors
|
||||
// TODO: is this still true with new pubsub?
|
||||
for i := 0; i < n; i++ {
|
||||
s := reactors[i].conS.GetState()
|
||||
reactors[i].SwitchToConsensus(s, false)
|
||||
}
|
||||
return reactors, blocksSubs, eventBuses
|
||||
}
|
||||
|
||||
func stopConsensusNet(logger log.Logger, reactors []*Reactor, eventBuses []*types.EventBus) {
|
||||
logger.Info("stopConsensusNet", "n", len(reactors))
|
||||
for i, r := range reactors {
|
||||
logger.Info("stopConsensusNet: Stopping Reactor", "i", i)
|
||||
if err := r.Switch.Stop(); err != nil {
|
||||
logger.Error("error trying to stop switch", "error", err)
|
||||
}
|
||||
}
|
||||
for i, b := range eventBuses {
|
||||
logger.Info("stopConsensusNet: Stopping eventBus", "i", i)
|
||||
if err := b.Stop(); err != nil {
|
||||
logger.Error("error trying to stop eventbus", "error", err)
|
||||
}
|
||||
}
|
||||
logger.Info("stopConsensusNet: DONE", "n", len(reactors))
|
||||
}
|
||||
|
||||
// Ensure a testnet makes blocks
|
||||
func TestReactorBasic(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
defer cleanup()
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
// wait till everyone makes the first new block
|
||||
timeoutWaitGroup(t, N, func(j int) {
|
||||
<-blocksSubs[j].Out()
|
||||
}, css)
|
||||
}
|
||||
|
||||
// Ensure we can process blocks with evidence
|
||||
func TestReactorWithEvidence(t *testing.T) {
|
||||
nValidators := 4
|
||||
testName := "consensus_reactor_test"
|
||||
tickerFunc := newMockTickerFunc(true)
|
||||
appFunc := newCounter
|
||||
|
||||
// heed the advice from https://www.sandimetz.com/blog/2016/1/20/the-wrong-abstraction
|
||||
// to unroll unwieldy abstractions. Here we duplicate the code from:
|
||||
// css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
|
||||
genDoc, privVals := randGenesisDoc(nValidators, false, 30)
|
||||
css := make([]*State, nValidators)
|
||||
logger := consensusLogger()
|
||||
for i := 0; i < nValidators; i++ {
|
||||
stateDB := dbm.NewMemDB() // each state needs its own db
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
state, _ := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
thisConfig := ResetConfig(fmt.Sprintf("%s_%d", testName, i))
|
||||
defer os.RemoveAll(thisConfig.RootDir)
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
app := appFunc()
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
|
||||
pv := privVals[i]
|
||||
// duplicate code from:
|
||||
// css[i] = newStateWithConfig(thisConfig, state, privVals[i], app)
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(tmsync.Mutex)
|
||||
proxyAppConnMem := abcicli.NewLocalClient(mtx, app)
|
||||
proxyAppConnCon := abcicli.NewLocalClient(mtx, app)
|
||||
|
||||
// Make Mempool
|
||||
mempool := mempl.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0)
|
||||
mempool.SetLogger(log.TestingLogger().With("module", "mempool"))
|
||||
if thisConfig.Consensus.WaitForTxs() {
|
||||
mempool.EnableTxsAvailable()
|
||||
}
|
||||
|
||||
// mock the evidence pool
|
||||
// everyone includes evidence of another double signing
|
||||
vIdx := (i + 1) % nValidators
|
||||
ev := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], config.ChainID())
|
||||
evpool := &statemocks.EvidencePool{}
|
||||
evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil)
|
||||
evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]types.Evidence{
|
||||
ev}, int64(len(ev.Bytes())))
|
||||
evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return()
|
||||
|
||||
evpool2 := sm.EmptyEvidencePool{}
|
||||
|
||||
// Make State
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool)
|
||||
cs := NewState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2)
|
||||
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
|
||||
cs.SetPrivValidator(pv)
|
||||
|
||||
eventBus := types.NewEventBus()
|
||||
eventBus.SetLogger(log.TestingLogger().With("module", "events"))
|
||||
err := eventBus.Start()
|
||||
require.NoError(t, err)
|
||||
cs.SetEventBus(eventBus)
|
||||
|
||||
cs.SetTimeoutTicker(tickerFunc())
|
||||
cs.SetLogger(logger.With("validator", i, "module", "consensus"))
|
||||
|
||||
css[i] = cs
|
||||
}
|
||||
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nValidators)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
// we expect for each validator that is the proposer to propose one piece of evidence.
|
||||
for i := 0; i < nValidators; i++ {
|
||||
timeoutWaitGroup(t, nValidators, func(j int) {
|
||||
msg := <-blocksSubs[j].Out()
|
||||
block := msg.Data().(types.EventDataNewBlock).Block
|
||||
assert.Len(t, block.Evidence.Evidence, 1)
|
||||
}, css)
|
||||
}
|
||||
}
|
||||
|
||||
//------------------------------------
|
||||
|
||||
// Ensure a testnet makes blocks when there are txs
|
||||
func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter,
|
||||
func(c *cfg.Config) {
|
||||
c.Consensus.CreateEmptyBlocks = false
|
||||
})
|
||||
defer cleanup()
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
// send a tx
|
||||
if err := assertMempool(css[3].txNotifier).CheckTx([]byte{1, 2, 3}, nil, mempl.TxInfo{}); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
timeoutWaitGroup(t, N, func(j int) {
|
||||
<-blocksSubs[j].Out()
|
||||
}, css)
|
||||
}
|
||||
|
||||
func TestReactorReceiveDoesNotPanicIfAddPeerHasntBeenCalledYet(t *testing.T) {
|
||||
N := 1
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
defer cleanup()
|
||||
reactors, _, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
var (
|
||||
reactor = reactors[0]
|
||||
peer = p2pmock.NewPeer(nil)
|
||||
msg = MustEncode(&HasVoteMessage{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType})
|
||||
)
|
||||
|
||||
reactor.InitPeer(peer)
|
||||
|
||||
// simulate switch calling Receive before AddPeer
|
||||
assert.NotPanics(t, func() {
|
||||
reactor.Receive(StateChannel, peer, msg)
|
||||
reactor.AddPeer(peer)
|
||||
})
|
||||
}
|
||||
|
||||
func TestReactorReceivePanicsIfInitPeerHasntBeenCalledYet(t *testing.T) {
|
||||
N := 1
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
defer cleanup()
|
||||
reactors, _, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
var (
|
||||
reactor = reactors[0]
|
||||
peer = p2pmock.NewPeer(nil)
|
||||
msg = MustEncode(&HasVoteMessage{Height: 1,
|
||||
Round: 1, Index: 1, Type: tmproto.PrevoteType})
|
||||
)
|
||||
|
||||
// we should call InitPeer here
|
||||
|
||||
// simulate switch calling Receive before AddPeer
|
||||
assert.Panics(t, func() {
|
||||
reactor.Receive(StateChannel, peer, msg)
|
||||
})
|
||||
}
|
||||
|
||||
// Test we record stats about votes and block parts from other peers.
|
||||
func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter)
|
||||
defer cleanup()
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
timeoutWaitGroup(t, N, func(j int) {
|
||||
<-blocksSubs[j].Out()
|
||||
}, css)
|
||||
|
||||
// Get peer
|
||||
peer := reactors[1].Switch.Peers().List()[0]
|
||||
// Get peer state
|
||||
ps := peer.Get(types.PeerStateKey).(*PeerState)
|
||||
|
||||
assert.Equal(t, true, ps.VotesSent() > 0, "number of votes sent should have increased")
|
||||
assert.Equal(t, true, ps.BlockPartsSent() > 0, "number of votes sent should have increased")
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------
|
||||
// ensure we can make blocks despite cycling a validator set
|
||||
|
||||
func TestReactorVotingPowerChange(t *testing.T) {
|
||||
nVals := 4
|
||||
logger := log.TestingLogger()
|
||||
css, cleanup := randConsensusNet(
|
||||
nVals,
|
||||
"consensus_voting_power_changes_test",
|
||||
newMockTickerFunc(true),
|
||||
newPersistentKVStore)
|
||||
defer cleanup()
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nVals)
|
||||
defer stopConsensusNet(logger, reactors, eventBuses)
|
||||
|
||||
// map of active validators
|
||||
activeVals := make(map[string]struct{})
|
||||
for i := 0; i < nVals; i++ {
|
||||
pubKey, err := css[i].privValidator.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
addr := pubKey.Address()
|
||||
activeVals[string(addr)] = struct{}{}
|
||||
}
|
||||
|
||||
// wait till everyone makes block 1
|
||||
timeoutWaitGroup(t, nVals, func(j int) {
|
||||
<-blocksSubs[j].Out()
|
||||
}, css)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
logger.Debug("---------------------------- Testing changing the voting power of one validator a few times")
|
||||
|
||||
val1PubKey, err := css[0].privValidator.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
val1PubKeyABCI, err := cryptoenc.PubKeyToProto(val1PubKey)
|
||||
require.NoError(t, err)
|
||||
updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25)
|
||||
previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
|
||||
waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
|
||||
|
||||
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
|
||||
t.Fatalf(
|
||||
"expected voting power to change (before: %d, after: %d)",
|
||||
previousTotalVotingPower,
|
||||
css[0].GetRoundState().LastValidators.TotalVotingPower())
|
||||
}
|
||||
|
||||
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2)
|
||||
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
|
||||
waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
|
||||
|
||||
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
|
||||
t.Fatalf(
|
||||
"expected voting power to change (before: %d, after: %d)",
|
||||
previousTotalVotingPower,
|
||||
css[0].GetRoundState().LastValidators.TotalVotingPower())
|
||||
}
|
||||
|
||||
updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26)
|
||||
previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
|
||||
waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
|
||||
waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css)
|
||||
|
||||
if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
|
||||
t.Fatalf(
|
||||
"expected voting power to change (before: %d, after: %d)",
|
||||
previousTotalVotingPower,
|
||||
css[0].GetRoundState().LastValidators.TotalVotingPower())
|
||||
}
|
||||
}
|
||||
|
||||
func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
nPeers := 7
|
||||
nVals := 4
|
||||
css, _, _, cleanup := randConsensusNetWithPeers(
|
||||
nVals,
|
||||
nPeers,
|
||||
"consensus_val_set_changes_test",
|
||||
newMockTickerFunc(true),
|
||||
newPersistentKVStoreWithPath)
|
||||
|
||||
defer cleanup()
|
||||
logger := log.TestingLogger()
|
||||
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, nPeers)
|
||||
defer stopConsensusNet(logger, reactors, eventBuses)
|
||||
|
||||
// map of active validators
|
||||
activeVals := make(map[string]struct{})
|
||||
for i := 0; i < nVals; i++ {
|
||||
pubKey, err := css[i].privValidator.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
activeVals[string(pubKey.Address())] = struct{}{}
|
||||
}
|
||||
|
||||
// wait till everyone makes block 1
|
||||
timeoutWaitGroup(t, nPeers, func(j int) {
|
||||
<-blocksSubs[j].Out()
|
||||
}, css)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
logger.Info("---------------------------- Testing adding one validator")
|
||||
|
||||
newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey()
|
||||
assert.NoError(t, err)
|
||||
valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1)
|
||||
assert.NoError(t, err)
|
||||
newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower)
|
||||
|
||||
// wait till everyone makes block 2
|
||||
// ensure the commit includes all validators
|
||||
// send newValTx to change vals in block 3
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1)
|
||||
|
||||
// wait till everyone makes block 3.
|
||||
// it includes the commit for block 2, which is by the original validator set
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx1)
|
||||
|
||||
// wait till everyone makes block 4.
|
||||
// it includes the commit for block 3, which is by the original validator set
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
|
||||
|
||||
// the commits for block 4 should be with the updated validator set
|
||||
activeVals[string(newValidatorPubKey1.Address())] = struct{}{}
|
||||
|
||||
// wait till everyone makes block 5
|
||||
// it includes the commit for block 4, which should have the updated validator set
|
||||
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
logger.Info("---------------------------- Testing changing the voting power of one validator")
|
||||
|
||||
updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1)
|
||||
require.NoError(t, err)
|
||||
updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25)
|
||||
previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower()
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
|
||||
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
|
||||
|
||||
if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower {
|
||||
t.Errorf(
|
||||
"expected voting power to change (before: %d, after: %d)",
|
||||
previousTotalVotingPower,
|
||||
css[nVals].GetRoundState().LastValidators.TotalVotingPower())
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
logger.Info("---------------------------- Testing adding two validators at once")
|
||||
|
||||
newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2)
|
||||
require.NoError(t, err)
|
||||
newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower)
|
||||
|
||||
newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3)
|
||||
require.NoError(t, err)
|
||||
newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower)
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, newValidatorTx2, newValidatorTx3)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
|
||||
activeVals[string(newValidatorPubKey2.Address())] = struct{}{}
|
||||
activeVals[string(newValidatorPubKey3.Address())] = struct{}{}
|
||||
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
logger.Info("---------------------------- Testing removing two validators at once")
|
||||
|
||||
removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0)
|
||||
removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0)
|
||||
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3)
|
||||
waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, removeValidatorTx2, removeValidatorTx3)
|
||||
waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css)
|
||||
delete(activeVals, string(newValidatorPubKey2.Address()))
|
||||
delete(activeVals, string(newValidatorPubKey3.Address()))
|
||||
waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css)
|
||||
}
|
||||
|
||||
// Check we can make blocks with skip_timeout_commit=false
|
||||
func TestReactorWithTimeoutCommit(t *testing.T) {
|
||||
N := 4
|
||||
css, cleanup := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter)
|
||||
defer cleanup()
|
||||
// override default SkipTimeoutCommit == true for tests
|
||||
for i := 0; i < N; i++ {
|
||||
css[i].config.SkipTimeoutCommit = false
|
||||
}
|
||||
|
||||
reactors, blocksSubs, eventBuses := startConsensusNet(t, css, N-1)
|
||||
defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses)
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
timeoutWaitGroup(t, N-1, func(j int) {
|
||||
<-blocksSubs[j].Out()
|
||||
}, css)
|
||||
}
|
||||
|
||||
func waitForAndValidateBlock(
|
||||
t *testing.T,
|
||||
n int,
|
||||
activeVals map[string]struct{},
|
||||
blocksSubs []types.Subscription,
|
||||
css []*State,
|
||||
txs ...[]byte,
|
||||
) {
|
||||
timeoutWaitGroup(t, n, func(j int) {
|
||||
css[j].Logger.Debug("waitForAndValidateBlock")
|
||||
msg := <-blocksSubs[j].Out()
|
||||
newBlock := msg.Data().(types.EventDataNewBlock).Block
|
||||
css[j].Logger.Debug("waitForAndValidateBlock: Got block", "height", newBlock.Height)
|
||||
err := validateBlock(newBlock, activeVals)
|
||||
assert.Nil(t, err)
|
||||
for _, tx := range txs {
|
||||
err := assertMempool(css[j].txNotifier).CheckTx(tx, nil, mempl.TxInfo{})
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
}, css)
|
||||
}
|
||||
|
||||
func waitForAndValidateBlockWithTx(
|
||||
t *testing.T,
|
||||
n int,
|
||||
activeVals map[string]struct{},
|
||||
blocksSubs []types.Subscription,
|
||||
css []*State,
|
||||
txs ...[]byte,
|
||||
) {
|
||||
timeoutWaitGroup(t, n, func(j int) {
|
||||
ntxs := 0
|
||||
BLOCK_TX_LOOP:
|
||||
for {
|
||||
css[j].Logger.Debug("waitForAndValidateBlockWithTx", "ntxs", ntxs)
|
||||
msg := <-blocksSubs[j].Out()
|
||||
newBlock := msg.Data().(types.EventDataNewBlock).Block
|
||||
css[j].Logger.Debug("waitForAndValidateBlockWithTx: Got block", "height", newBlock.Height)
|
||||
err := validateBlock(newBlock, activeVals)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// check that txs match the txs we're waiting for.
|
||||
// note they could be spread over multiple blocks,
|
||||
// but they should be in order.
|
||||
for _, tx := range newBlock.Data.Txs {
|
||||
assert.EqualValues(t, txs[ntxs], tx)
|
||||
ntxs++
|
||||
}
|
||||
|
||||
if ntxs == len(txs) {
|
||||
break BLOCK_TX_LOOP
|
||||
}
|
||||
}
|
||||
|
||||
}, css)
|
||||
}
|
||||
|
||||
func waitForBlockWithUpdatedValsAndValidateIt(
|
||||
t *testing.T,
|
||||
n int,
|
||||
updatedVals map[string]struct{},
|
||||
blocksSubs []types.Subscription,
|
||||
css []*State,
|
||||
) {
|
||||
timeoutWaitGroup(t, n, func(j int) {
|
||||
|
||||
var newBlock *types.Block
|
||||
LOOP:
|
||||
for {
|
||||
css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt")
|
||||
msg := <-blocksSubs[j].Out()
|
||||
newBlock = msg.Data().(types.EventDataNewBlock).Block
|
||||
if newBlock.LastCommit.Size() == len(updatedVals) {
|
||||
css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height)
|
||||
break LOOP
|
||||
} else {
|
||||
css[j].Logger.Debug(
|
||||
"waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping",
|
||||
"height",
|
||||
newBlock.Height)
|
||||
}
|
||||
}
|
||||
|
||||
err := validateBlock(newBlock, updatedVals)
|
||||
assert.Nil(t, err)
|
||||
}, css)
|
||||
}
|
||||
|
||||
// expects high synchrony!
|
||||
func validateBlock(block *types.Block, activeVals map[string]struct{}) error {
|
||||
if block.LastCommit.Size() != len(activeVals) {
|
||||
return fmt.Errorf(
|
||||
"commit size doesn't match number of active validators. Got %d, expected %d",
|
||||
block.LastCommit.Size(),
|
||||
len(activeVals))
|
||||
}
|
||||
|
||||
for _, commitSig := range block.LastCommit.Signatures {
|
||||
if _, ok := activeVals[string(commitSig.ValidatorAddress)]; !ok {
|
||||
return fmt.Errorf("found vote for inactive validator %X", commitSig.ValidatorAddress)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*State) {
|
||||
wg := new(sync.WaitGroup)
|
||||
wg.Add(n)
|
||||
for i := 0; i < n; i++ {
|
||||
go func(j int) {
|
||||
f(j)
|
||||
wg.Done()
|
||||
}(i)
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(done)
|
||||
}()
|
||||
|
||||
// we're running many nodes in-process, possibly in in a virtual machine,
|
||||
// and spewing debug messages - making a block could take a while,
|
||||
timeout := time.Second * 120
|
||||
|
||||
select {
|
||||
case <-done:
|
||||
case <-time.After(timeout):
|
||||
for i, cs := range css {
|
||||
t.Log("#################")
|
||||
t.Log("Validator", i)
|
||||
t.Log(cs.GetRoundState())
|
||||
t.Log("")
|
||||
}
|
||||
os.Stdout.Write([]byte("pprof.Lookup('goroutine'):\n"))
|
||||
err := pprof.Lookup("goroutine").WriteTo(os.Stdout, 1)
|
||||
require.NoError(t, err)
|
||||
capture()
|
||||
panic("Timed out waiting for all validators to commit a block")
|
||||
}
|
||||
}
|
||||
|
||||
func capture() {
|
||||
trace := make([]byte, 10240000)
|
||||
count := runtime.Stack(trace, true)
|
||||
fmt.Printf("Stack of %d bytes: %s\n", count, trace)
|
||||
}
|
||||
|
||||
//-------------------------------------------------------------
|
||||
// Ensure basic validation of structs is functioning
|
||||
|
||||
func TestNewRoundStepMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct { // nolint: maligned
|
||||
expectErr bool
|
||||
messageRound int32
|
||||
messageLastCommitRound int32
|
||||
messageHeight int64
|
||||
testName string
|
||||
messageStep cstypes.RoundStepType
|
||||
}{
|
||||
{false, 0, 0, 0, "Valid Message", cstypes.RoundStepNewHeight},
|
||||
{true, -1, 0, 0, "Negative round", cstypes.RoundStepNewHeight},
|
||||
{true, 0, 0, -1, "Negative height", cstypes.RoundStepNewHeight},
|
||||
{true, 0, 0, 0, "Invalid Step", cstypes.RoundStepCommit + 1},
|
||||
// The following cases will be handled by ValidateHeight
|
||||
{false, 0, 0, 1, "H == 1 but LCR != -1 ", cstypes.RoundStepNewHeight},
|
||||
{false, 0, -1, 2, "H > 1 but LCR < 0", cstypes.RoundStepNewHeight},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
message := NewRoundStepMessage{
|
||||
Height: tc.messageHeight,
|
||||
Round: tc.messageRound,
|
||||
Step: tc.messageStep,
|
||||
LastCommitRound: tc.messageLastCommitRound,
|
||||
}
|
||||
|
||||
err := message.ValidateBasic()
|
||||
if tc.expectErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewRoundStepMessageValidateHeight(t *testing.T) {
|
||||
initialHeight := int64(10)
|
||||
testCases := []struct { // nolint: maligned
|
||||
expectErr bool
|
||||
messageLastCommitRound int32
|
||||
messageHeight int64
|
||||
testName string
|
||||
}{
|
||||
{false, 0, 11, "Valid Message"},
|
||||
{true, 0, -1, "Negative height"},
|
||||
{true, 0, 0, "Zero height"},
|
||||
{true, 0, 10, "Initial height but LCR != -1 "},
|
||||
{true, -1, 11, "Normal height but LCR < 0"},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
message := NewRoundStepMessage{
|
||||
Height: tc.messageHeight,
|
||||
Round: 0,
|
||||
Step: cstypes.RoundStepNewHeight,
|
||||
LastCommitRound: tc.messageLastCommitRound,
|
||||
}
|
||||
|
||||
err := message.ValidateHeight(initialHeight)
|
||||
if tc.expectErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewValidBlockMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
malleateFn func(*NewValidBlockMessage)
|
||||
expErr string
|
||||
}{
|
||||
{func(msg *NewValidBlockMessage) {}, ""},
|
||||
{func(msg *NewValidBlockMessage) { msg.Height = -1 }, "negative Height"},
|
||||
{func(msg *NewValidBlockMessage) { msg.Round = -1 }, "negative Round"},
|
||||
{
|
||||
func(msg *NewValidBlockMessage) { msg.BlockPartSetHeader.Total = 2 },
|
||||
"blockParts bit array size 1 not equal to BlockPartSetHeader.Total 2",
|
||||
},
|
||||
{
|
||||
func(msg *NewValidBlockMessage) {
|
||||
msg.BlockPartSetHeader.Total = 0
|
||||
msg.BlockParts = bits.NewBitArray(0)
|
||||
},
|
||||
"empty blockParts",
|
||||
},
|
||||
{
|
||||
func(msg *NewValidBlockMessage) { msg.BlockParts = bits.NewBitArray(int(types.MaxBlockPartsCount) + 1) },
|
||||
"blockParts bit array size 1602 not equal to BlockPartSetHeader.Total 1",
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
|
||||
msg := &NewValidBlockMessage{
|
||||
Height: 1,
|
||||
Round: 0,
|
||||
BlockPartSetHeader: types.PartSetHeader{
|
||||
Total: 1,
|
||||
},
|
||||
BlockParts: bits.NewBitArray(1),
|
||||
}
|
||||
|
||||
tc.malleateFn(msg)
|
||||
err := msg.ValidateBasic()
|
||||
if tc.expErr != "" && assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), tc.expErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProposalPOLMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
malleateFn func(*ProposalPOLMessage)
|
||||
expErr string
|
||||
}{
|
||||
{func(msg *ProposalPOLMessage) {}, ""},
|
||||
{func(msg *ProposalPOLMessage) { msg.Height = -1 }, "negative Height"},
|
||||
{func(msg *ProposalPOLMessage) { msg.ProposalPOLRound = -1 }, "negative ProposalPOLRound"},
|
||||
{func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(0) }, "empty ProposalPOL bit array"},
|
||||
{func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(types.MaxVotesCount + 1) },
|
||||
"proposalPOL bit array is too big: 10001, max: 10000"},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
|
||||
msg := &ProposalPOLMessage{
|
||||
Height: 1,
|
||||
ProposalPOLRound: 1,
|
||||
ProposalPOL: bits.NewBitArray(1),
|
||||
}
|
||||
|
||||
tc.malleateFn(msg)
|
||||
err := msg.ValidateBasic()
|
||||
if tc.expErr != "" && assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), tc.expErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPartMessageValidateBasic(t *testing.T) {
|
||||
testPart := new(types.Part)
|
||||
testPart.Proof.LeafHash = tmhash.Sum([]byte("leaf"))
|
||||
testCases := []struct {
|
||||
testName string
|
||||
messageHeight int64
|
||||
messageRound int32
|
||||
messagePart *types.Part
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Message", 0, 0, testPart, false},
|
||||
{"Invalid Message", -1, 0, testPart, true},
|
||||
{"Invalid Message", 0, -1, testPart, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
message := BlockPartMessage{
|
||||
Height: tc.messageHeight,
|
||||
Round: tc.messageRound,
|
||||
Part: tc.messagePart,
|
||||
}
|
||||
|
||||
assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
|
||||
message := BlockPartMessage{Height: 0, Round: 0, Part: new(types.Part)}
|
||||
message.Part.Index = 1
|
||||
|
||||
assert.Equal(t, true, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
}
|
||||
|
||||
func TestHasVoteMessageValidateBasic(t *testing.T) {
|
||||
const (
|
||||
validSignedMsgType tmproto.SignedMsgType = 0x01
|
||||
invalidSignedMsgType tmproto.SignedMsgType = 0x03
|
||||
)
|
||||
|
||||
testCases := []struct { // nolint: maligned
|
||||
expectErr bool
|
||||
messageRound int32
|
||||
messageIndex int32
|
||||
messageHeight int64
|
||||
testName string
|
||||
messageType tmproto.SignedMsgType
|
||||
}{
|
||||
{false, 0, 0, 0, "Valid Message", validSignedMsgType},
|
||||
{true, -1, 0, 0, "Invalid Message", validSignedMsgType},
|
||||
{true, 0, -1, 0, "Invalid Message", validSignedMsgType},
|
||||
{true, 0, 0, 0, "Invalid Message", invalidSignedMsgType},
|
||||
{true, 0, 0, -1, "Invalid Message", validSignedMsgType},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
message := HasVoteMessage{
|
||||
Height: tc.messageHeight,
|
||||
Round: tc.messageRound,
|
||||
Type: tc.messageType,
|
||||
Index: tc.messageIndex,
|
||||
}
|
||||
|
||||
assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVoteSetMaj23MessageValidateBasic(t *testing.T) {
|
||||
const (
|
||||
validSignedMsgType tmproto.SignedMsgType = 0x01
|
||||
invalidSignedMsgType tmproto.SignedMsgType = 0x03
|
||||
)
|
||||
|
||||
validBlockID := types.BlockID{}
|
||||
invalidBlockID := types.BlockID{
|
||||
Hash: bytes.HexBytes{},
|
||||
PartSetHeader: types.PartSetHeader{
|
||||
Total: 1,
|
||||
Hash: []byte{0},
|
||||
},
|
||||
}
|
||||
|
||||
testCases := []struct { // nolint: maligned
|
||||
expectErr bool
|
||||
messageRound int32
|
||||
messageHeight int64
|
||||
testName string
|
||||
messageType tmproto.SignedMsgType
|
||||
messageBlockID types.BlockID
|
||||
}{
|
||||
{false, 0, 0, "Valid Message", validSignedMsgType, validBlockID},
|
||||
{true, -1, 0, "Invalid Message", validSignedMsgType, validBlockID},
|
||||
{true, 0, -1, "Invalid Message", validSignedMsgType, validBlockID},
|
||||
{true, 0, 0, "Invalid Message", invalidSignedMsgType, validBlockID},
|
||||
{true, 0, 0, "Invalid Message", validSignedMsgType, invalidBlockID},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
message := VoteSetMaj23Message{
|
||||
Height: tc.messageHeight,
|
||||
Round: tc.messageRound,
|
||||
Type: tc.messageType,
|
||||
BlockID: tc.messageBlockID,
|
||||
}
|
||||
|
||||
assert.Equal(t, tc.expectErr, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVoteSetBitsMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
malleateFn func(*VoteSetBitsMessage)
|
||||
expErr string
|
||||
}{
|
||||
{func(msg *VoteSetBitsMessage) {}, ""},
|
||||
{func(msg *VoteSetBitsMessage) { msg.Height = -1 }, "negative Height"},
|
||||
{func(msg *VoteSetBitsMessage) { msg.Type = 0x03 }, "invalid Type"},
|
||||
{func(msg *VoteSetBitsMessage) {
|
||||
msg.BlockID = types.BlockID{
|
||||
Hash: bytes.HexBytes{},
|
||||
PartSetHeader: types.PartSetHeader{
|
||||
Total: 1,
|
||||
Hash: []byte{0},
|
||||
},
|
||||
}
|
||||
}, "wrong BlockID: wrong PartSetHeader: wrong Hash:"},
|
||||
{func(msg *VoteSetBitsMessage) { msg.Votes = bits.NewBitArray(types.MaxVotesCount + 1) },
|
||||
"votes bit array is too big: 10001, max: 10000"},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) {
|
||||
msg := &VoteSetBitsMessage{
|
||||
Height: 1,
|
||||
Round: 0,
|
||||
Type: 0x01,
|
||||
Votes: bits.NewBitArray(1),
|
||||
BlockID: types.BlockID{},
|
||||
}
|
||||
|
||||
tc.malleateFn(msg)
|
||||
err := msg.ValidateBasic()
|
||||
if tc.expErr != "" && assert.Error(t, err) {
|
||||
assert.Contains(t, err.Error(), tc.expErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,534 +0,0 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto/merkle"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var crc32c = crc32.MakeTable(crc32.Castagnoli)
|
||||
|
||||
// Functionality to replay blocks and messages on recovery from a crash.
|
||||
// There are two general failure scenarios:
|
||||
//
|
||||
// 1. failure during consensus
|
||||
// 2. failure while applying the block
|
||||
//
|
||||
// The former is handled by the WAL, the latter by the proxyApp Handshake on
|
||||
// restart, which ultimately hands off the work to the WAL.
|
||||
|
||||
//-----------------------------------------
|
||||
// 1. Recover from failure during consensus
|
||||
// (by replaying messages from the WAL)
|
||||
//-----------------------------------------
|
||||
|
||||
// Unmarshal and apply a single message to the consensus state as if it were
|
||||
// received in receiveRoutine. Lines that start with "#" are ignored.
|
||||
// NOTE: receiveRoutine should not be running.
|
||||
func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscription) error {
|
||||
// Skip meta messages which exist for demarcating boundaries.
|
||||
if _, ok := msg.Msg.(EndHeightMessage); ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
// for logging
|
||||
switch m := msg.Msg.(type) {
|
||||
case types.EventDataRoundState:
|
||||
cs.Logger.Info("Replay: New Step", "height", m.Height, "round", m.Round, "step", m.Step)
|
||||
// these are playback checks
|
||||
ticker := time.After(time.Second * 2)
|
||||
if newStepSub != nil {
|
||||
select {
|
||||
case stepMsg := <-newStepSub.Out():
|
||||
m2 := stepMsg.Data().(types.EventDataRoundState)
|
||||
if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step {
|
||||
return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m)
|
||||
}
|
||||
case <-newStepSub.Cancelled():
|
||||
return fmt.Errorf("failed to read off newStepSub.Out(). newStepSub was cancelled")
|
||||
case <-ticker:
|
||||
return fmt.Errorf("failed to read off newStepSub.Out()")
|
||||
}
|
||||
}
|
||||
case msgInfo:
|
||||
peerID := m.PeerID
|
||||
if peerID == "" {
|
||||
peerID = "local"
|
||||
}
|
||||
switch msg := m.Msg.(type) {
|
||||
case *ProposalMessage:
|
||||
p := msg.Proposal
|
||||
cs.Logger.Info("Replay: Proposal", "height", p.Height, "round", p.Round, "header",
|
||||
p.BlockID.PartSetHeader, "pol", p.POLRound, "peer", peerID)
|
||||
case *BlockPartMessage:
|
||||
cs.Logger.Info("Replay: BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerID)
|
||||
case *VoteMessage:
|
||||
v := msg.Vote
|
||||
cs.Logger.Info("Replay: Vote", "height", v.Height, "round", v.Round, "type", v.Type,
|
||||
"blockID", v.BlockID, "peer", peerID)
|
||||
}
|
||||
|
||||
cs.handleMsg(m)
|
||||
case timeoutInfo:
|
||||
cs.Logger.Info("Replay: Timeout", "height", m.Height, "round", m.Round, "step", m.Step, "dur", m.Duration)
|
||||
cs.handleTimeout(m, cs.RoundState)
|
||||
default:
|
||||
return fmt.Errorf("replay: Unknown TimedWALMessage type: %v", reflect.TypeOf(msg.Msg))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Replay only those messages since the last block. `timeoutRoutine` should
|
||||
// run concurrently to read off tickChan.
|
||||
func (cs *State) catchupReplay(csHeight int64) error {
|
||||
|
||||
// Set replayMode to true so we don't log signing errors.
|
||||
cs.replayMode = true
|
||||
defer func() { cs.replayMode = false }()
|
||||
|
||||
// Ensure that #ENDHEIGHT for this height doesn't exist.
|
||||
// NOTE: This is just a sanity check. As far as we know things work fine
|
||||
// without it, and Handshake could reuse State if it weren't for
|
||||
// this check (since we can crash after writing #ENDHEIGHT).
|
||||
//
|
||||
// Ignore data corruption errors since this is a sanity check.
|
||||
gr, found, err := cs.wal.SearchForEndHeight(csHeight, &WALSearchOptions{IgnoreDataCorruptionErrors: true})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if gr != nil {
|
||||
if err := gr.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if found {
|
||||
return fmt.Errorf("wal should not contain #ENDHEIGHT %d", csHeight)
|
||||
}
|
||||
|
||||
// Search for last height marker.
|
||||
//
|
||||
// Ignore data corruption errors in previous heights because we only care about last height
|
||||
if csHeight < cs.state.InitialHeight {
|
||||
return fmt.Errorf("cannot replay height %v, below initial height %v", csHeight, cs.state.InitialHeight)
|
||||
}
|
||||
endHeight := csHeight - 1
|
||||
if csHeight == cs.state.InitialHeight {
|
||||
endHeight = 0
|
||||
}
|
||||
gr, found, err = cs.wal.SearchForEndHeight(endHeight, &WALSearchOptions{IgnoreDataCorruptionErrors: true})
|
||||
if err == io.EOF {
|
||||
cs.Logger.Error("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", endHeight)
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("cannot replay height %d. WAL does not contain #ENDHEIGHT for %d", csHeight, endHeight)
|
||||
}
|
||||
defer gr.Close()
|
||||
|
||||
cs.Logger.Info("Catchup by replaying consensus messages", "height", csHeight)
|
||||
|
||||
var msg *TimedWALMessage
|
||||
dec := WALDecoder{gr}
|
||||
|
||||
LOOP:
|
||||
for {
|
||||
msg, err = dec.Decode()
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
break LOOP
|
||||
case IsDataCorruptionError(err):
|
||||
cs.Logger.Error("data has been corrupted in last height of consensus WAL", "err", err, "height", csHeight)
|
||||
return err
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
|
||||
// NOTE: since the priv key is set when the msgs are received
|
||||
// it will attempt to eg double sign but we can just ignore it
|
||||
// since the votes will be replayed and we'll get to the next step
|
||||
if err := cs.readReplayMessage(msg, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
cs.Logger.Info("Replay: Done")
|
||||
return nil
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
// Parses marker lines of the form:
|
||||
// #ENDHEIGHT: 12345
|
||||
/*
|
||||
func makeHeightSearchFunc(height int64) auto.SearchFunc {
|
||||
return func(line string) (int, error) {
|
||||
line = strings.TrimRight(line, "\n")
|
||||
parts := strings.Split(line, " ")
|
||||
if len(parts) != 2 {
|
||||
return -1, errors.New("line did not have 2 parts")
|
||||
}
|
||||
i, err := strconv.Atoi(parts[1])
|
||||
if err != nil {
|
||||
return -1, errors.New("failed to parse INFO: " + err.Error())
|
||||
}
|
||||
if height < i {
|
||||
return 1, nil
|
||||
} else if height == i {
|
||||
return 0, nil
|
||||
} else {
|
||||
return -1, nil
|
||||
}
|
||||
}
|
||||
}*/
|
||||
|
||||
//---------------------------------------------------
|
||||
// 2. Recover from failure while applying the block.
|
||||
// (by handshaking with the app to figure out where
|
||||
// we were last, and using the WAL to recover there.)
|
||||
//---------------------------------------------------
|
||||
|
||||
type Handshaker struct {
|
||||
stateStore sm.Store
|
||||
initialState sm.State
|
||||
store sm.BlockStore
|
||||
eventBus types.BlockEventPublisher
|
||||
genDoc *types.GenesisDoc
|
||||
logger log.Logger
|
||||
|
||||
nBlocks int // number of blocks applied to the state
|
||||
}
|
||||
|
||||
func NewHandshaker(stateStore sm.Store, state sm.State,
|
||||
store sm.BlockStore, genDoc *types.GenesisDoc) *Handshaker {
|
||||
|
||||
return &Handshaker{
|
||||
stateStore: stateStore,
|
||||
initialState: state,
|
||||
store: store,
|
||||
eventBus: types.NopEventBus{},
|
||||
genDoc: genDoc,
|
||||
logger: log.NewNopLogger(),
|
||||
nBlocks: 0,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handshaker) SetLogger(l log.Logger) {
|
||||
h.logger = l
|
||||
}
|
||||
|
||||
// SetEventBus - sets the event bus for publishing block related events.
|
||||
// If not called, it defaults to types.NopEventBus.
|
||||
func (h *Handshaker) SetEventBus(eventBus types.BlockEventPublisher) {
|
||||
h.eventBus = eventBus
|
||||
}
|
||||
|
||||
// NBlocks returns the number of blocks applied to the state.
|
||||
func (h *Handshaker) NBlocks() int {
|
||||
return h.nBlocks
|
||||
}
|
||||
|
||||
// TODO: retry the handshake/replay if it fails ?
|
||||
func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error {
|
||||
|
||||
// Handshake is done via ABCI Info on the query conn.
|
||||
res, err := proxyApp.Query().InfoSync(context.Background(), proxy.RequestInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error calling Info: %v", err)
|
||||
}
|
||||
|
||||
blockHeight := res.LastBlockHeight
|
||||
if blockHeight < 0 {
|
||||
return fmt.Errorf("got a negative last block height (%d) from the app", blockHeight)
|
||||
}
|
||||
appHash := res.LastBlockAppHash
|
||||
|
||||
h.logger.Info("ABCI Handshake App Info",
|
||||
"height", blockHeight,
|
||||
"hash", fmt.Sprintf("%X", appHash),
|
||||
"software-version", res.Version,
|
||||
"protocol-version", res.AppVersion,
|
||||
)
|
||||
|
||||
// Only set the version if there is no existing state.
|
||||
if h.initialState.LastBlockHeight == 0 {
|
||||
h.initialState.Version.Consensus.App = res.AppVersion
|
||||
}
|
||||
|
||||
// Replay blocks up to the latest in the blockstore.
|
||||
_, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error on replay: %v", err)
|
||||
}
|
||||
|
||||
h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced",
|
||||
"appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash))
|
||||
|
||||
// TODO: (on restart) replay mempool
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReplayBlocks replays all blocks since appBlockHeight and ensures the result
|
||||
// matches the current state.
|
||||
// Returns the final AppHash or an error.
|
||||
func (h *Handshaker) ReplayBlocks(
|
||||
state sm.State,
|
||||
appHash []byte,
|
||||
appBlockHeight int64,
|
||||
proxyApp proxy.AppConns,
|
||||
) ([]byte, error) {
|
||||
storeBlockBase := h.store.Base()
|
||||
storeBlockHeight := h.store.Height()
|
||||
stateBlockHeight := state.LastBlockHeight
|
||||
h.logger.Info(
|
||||
"ABCI Replay Blocks",
|
||||
"appHeight",
|
||||
appBlockHeight,
|
||||
"storeHeight",
|
||||
storeBlockHeight,
|
||||
"stateHeight",
|
||||
stateBlockHeight)
|
||||
|
||||
// If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain.
|
||||
if appBlockHeight == 0 {
|
||||
validators := make([]*types.Validator, len(h.genDoc.Validators))
|
||||
for i, val := range h.genDoc.Validators {
|
||||
validators[i] = types.NewValidator(val.PubKey, val.Power)
|
||||
}
|
||||
validatorSet := types.NewValidatorSet(validators)
|
||||
nextVals := types.TM2PB.ValidatorUpdates(validatorSet)
|
||||
csParams := types.TM2PB.ConsensusParams(h.genDoc.ConsensusParams)
|
||||
req := abci.RequestInitChain{
|
||||
Time: h.genDoc.GenesisTime,
|
||||
ChainId: h.genDoc.ChainID,
|
||||
InitialHeight: h.genDoc.InitialHeight,
|
||||
ConsensusParams: csParams,
|
||||
Validators: nextVals,
|
||||
AppStateBytes: h.genDoc.AppState,
|
||||
}
|
||||
res, err := proxyApp.Consensus().InitChainSync(context.Background(), req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
appHash = res.AppHash
|
||||
|
||||
if stateBlockHeight == 0 { // we only update state when we are in initial state
|
||||
// If the app did not return an app hash, we keep the one set from the genesis doc in
|
||||
// the state. We don't set appHash since we don't want the genesis doc app hash
|
||||
// recorded in the genesis block. We should probably just remove GenesisDoc.AppHash.
|
||||
if len(res.AppHash) > 0 {
|
||||
state.AppHash = res.AppHash
|
||||
}
|
||||
// If the app returned validators or consensus params, update the state.
|
||||
if len(res.Validators) > 0 {
|
||||
vals, err := types.PB2TM.ValidatorUpdates(res.Validators)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state.Validators = types.NewValidatorSet(vals)
|
||||
state.NextValidators = types.NewValidatorSet(vals).CopyIncrementProposerPriority(1)
|
||||
} else if len(h.genDoc.Validators) == 0 {
|
||||
// If validator set is not set in genesis and still empty after InitChain, exit.
|
||||
return nil, fmt.Errorf("validator set is nil in genesis and still empty after InitChain")
|
||||
}
|
||||
|
||||
if res.ConsensusParams != nil {
|
||||
state.ConsensusParams = types.UpdateConsensusParams(state.ConsensusParams, res.ConsensusParams)
|
||||
state.Version.Consensus.App = state.ConsensusParams.Version.AppVersion
|
||||
}
|
||||
// We update the last results hash with the empty hash, to conform with RFC-6962.
|
||||
state.LastResultsHash = merkle.HashFromByteSlices(nil)
|
||||
if err := h.stateStore.Save(state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// First handle edge cases and constraints on the storeBlockHeight and storeBlockBase.
|
||||
switch {
|
||||
case storeBlockHeight == 0:
|
||||
assertAppHashEqualsOneFromState(appHash, state)
|
||||
return appHash, nil
|
||||
|
||||
case appBlockHeight == 0 && state.InitialHeight < storeBlockBase:
|
||||
// the app has no state, and the block store is truncated above the initial height
|
||||
return appHash, sm.ErrAppBlockHeightTooLow{AppHeight: appBlockHeight, StoreBase: storeBlockBase}
|
||||
|
||||
case appBlockHeight > 0 && appBlockHeight < storeBlockBase-1:
|
||||
// the app is too far behind truncated store (can be 1 behind since we replay the next)
|
||||
return appHash, sm.ErrAppBlockHeightTooLow{AppHeight: appBlockHeight, StoreBase: storeBlockBase}
|
||||
|
||||
case storeBlockHeight < appBlockHeight:
|
||||
// the app should never be ahead of the store (but this is under app's control)
|
||||
return appHash, sm.ErrAppBlockHeightTooHigh{CoreHeight: storeBlockHeight, AppHeight: appBlockHeight}
|
||||
|
||||
case storeBlockHeight < stateBlockHeight:
|
||||
// the state should never be ahead of the store (this is under tendermint's control)
|
||||
panic(fmt.Sprintf("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight))
|
||||
|
||||
case storeBlockHeight > stateBlockHeight+1:
|
||||
// store should be at most one ahead of the state (this is under tendermint's control)
|
||||
panic(fmt.Sprintf("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1))
|
||||
}
|
||||
|
||||
var err error
|
||||
// Now either store is equal to state, or one ahead.
|
||||
// For each, consider all cases of where the app could be, given app <= store
|
||||
if storeBlockHeight == stateBlockHeight {
|
||||
// Tendermint ran Commit and saved the state.
|
||||
// Either the app is asking for replay, or we're all synced up.
|
||||
if appBlockHeight < storeBlockHeight {
|
||||
// the app is behind, so replay blocks, but no need to go through WAL (state is already synced to store)
|
||||
return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, false)
|
||||
|
||||
} else if appBlockHeight == storeBlockHeight {
|
||||
// We're good!
|
||||
assertAppHashEqualsOneFromState(appHash, state)
|
||||
return appHash, nil
|
||||
}
|
||||
|
||||
} else if storeBlockHeight == stateBlockHeight+1 {
|
||||
// We saved the block in the store but haven't updated the state,
|
||||
// so we'll need to replay a block using the WAL.
|
||||
switch {
|
||||
case appBlockHeight < stateBlockHeight:
|
||||
// the app is further behind than it should be, so replay blocks
|
||||
// but leave the last block to go through the WAL
|
||||
return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, true)
|
||||
|
||||
case appBlockHeight == stateBlockHeight:
|
||||
// We haven't run Commit (both the state and app are one block behind),
|
||||
// so replayBlock with the real app.
|
||||
// NOTE: We could instead use the cs.WAL on cs.Start,
|
||||
// but we'd have to allow the WAL to replay a block that wrote it's #ENDHEIGHT
|
||||
h.logger.Info("Replay last block using real app")
|
||||
state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus())
|
||||
return state.AppHash, err
|
||||
|
||||
case appBlockHeight == storeBlockHeight:
|
||||
// We ran Commit, but didn't save the state, so replayBlock with mock app.
|
||||
abciResponses, err := h.stateStore.LoadABCIResponses(storeBlockHeight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mockApp := newMockProxyApp(appHash, abciResponses)
|
||||
h.logger.Info("Replay last block using mock app")
|
||||
state, err = h.replayBlock(state, storeBlockHeight, mockApp)
|
||||
return state.AppHash, err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("uncovered case! appHeight: %d, storeHeight: %d, stateHeight: %d",
|
||||
appBlockHeight, storeBlockHeight, stateBlockHeight))
|
||||
}
|
||||
|
||||
func (h *Handshaker) replayBlocks(
|
||||
state sm.State,
|
||||
proxyApp proxy.AppConns,
|
||||
appBlockHeight,
|
||||
storeBlockHeight int64,
|
||||
mutateState bool) ([]byte, error) {
|
||||
// App is further behind than it should be, so we need to replay blocks.
|
||||
// We replay all blocks from appBlockHeight+1.
|
||||
//
|
||||
// Note that we don't have an old version of the state,
|
||||
// so we by-pass state validation/mutation using sm.ExecCommitBlock.
|
||||
// This also means we won't be saving validator sets if they change during this period.
|
||||
// TODO: Load the historical information to fix this and just use state.ApplyBlock
|
||||
//
|
||||
// If mutateState == true, the final block is replayed with h.replayBlock()
|
||||
|
||||
var appHash []byte
|
||||
var err error
|
||||
finalBlock := storeBlockHeight
|
||||
if mutateState {
|
||||
finalBlock--
|
||||
}
|
||||
firstBlock := appBlockHeight + 1
|
||||
if firstBlock == 1 {
|
||||
firstBlock = state.InitialHeight
|
||||
}
|
||||
for i := firstBlock; i <= finalBlock; i++ {
|
||||
h.logger.Info("Applying block", "height", i)
|
||||
block := h.store.LoadBlock(i)
|
||||
// Extra check to ensure the app was not changed in a way it shouldn't have.
|
||||
if len(appHash) > 0 {
|
||||
assertAppHashEqualsOneFromBlock(appHash, block)
|
||||
}
|
||||
|
||||
appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, h.stateStore, h.genDoc.InitialHeight)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h.nBlocks++
|
||||
}
|
||||
|
||||
if mutateState {
|
||||
// sync the final block
|
||||
state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
appHash = state.AppHash
|
||||
}
|
||||
|
||||
assertAppHashEqualsOneFromState(appHash, state)
|
||||
return appHash, nil
|
||||
}
|
||||
|
||||
// ApplyBlock on the proxyApp with the last block.
|
||||
func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.AppConnConsensus) (sm.State, error) {
|
||||
block := h.store.LoadBlock(height)
|
||||
meta := h.store.LoadBlockMeta(height)
|
||||
|
||||
// Use stubs for both mempool and evidence pool since no transactions nor
|
||||
// evidence are needed here - block already exists.
|
||||
blockExec := sm.NewBlockExecutor(h.stateStore, h.logger, proxyApp, emptyMempool{}, sm.EmptyEvidencePool{})
|
||||
blockExec.SetEventBus(h.eventBus)
|
||||
|
||||
var err error
|
||||
state, _, err = blockExec.ApplyBlock(state, meta.BlockID, block)
|
||||
if err != nil {
|
||||
return sm.State{}, err
|
||||
}
|
||||
|
||||
h.nBlocks++
|
||||
|
||||
return state, nil
|
||||
}
|
||||
|
||||
func assertAppHashEqualsOneFromBlock(appHash []byte, block *types.Block) {
|
||||
if !bytes.Equal(appHash, block.AppHash) {
|
||||
panic(fmt.Sprintf(`block.AppHash does not match AppHash after replay. Got %X, expected %X.
|
||||
|
||||
Block: %v
|
||||
`,
|
||||
appHash, block.AppHash, block))
|
||||
}
|
||||
}
|
||||
|
||||
func assertAppHashEqualsOneFromState(appHash []byte, state sm.State) {
|
||||
if !bytes.Equal(appHash, state.AppHash) {
|
||||
panic(fmt.Sprintf(`state.AppHash does not match AppHash after replay. Got
|
||||
%X, expected %X.
|
||||
|
||||
State: %v
|
||||
|
||||
Did you reset Tendermint without resetting your application's data?`,
|
||||
appHash, state.AppHash, state))
|
||||
}
|
||||
}
|
||||
33
crypto/batch/batch.go
Normal file
33
crypto/batch/batch.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package batch
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/crypto/sr25519"
|
||||
)
|
||||
|
||||
// CreateBatchVerifier checks if a key type implements the batch verifier interface.
|
||||
// Currently only ed25519 & sr25519 supports batch verification.
|
||||
func CreateBatchVerifier(pk crypto.PubKey) (crypto.BatchVerifier, bool) {
|
||||
|
||||
switch pk.Type() {
|
||||
case ed25519.KeyType:
|
||||
return ed25519.NewBatchVerifier(), true
|
||||
case sr25519.KeyType:
|
||||
return sr25519.NewBatchVerifier(), true
|
||||
}
|
||||
|
||||
// case where the key does not support batch verification
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// SupportsBatchVerifier checks if a key type implements the batch verifier
|
||||
// interface.
|
||||
func SupportsBatchVerifier(pk crypto.PubKey) bool {
|
||||
switch pk.Type() {
|
||||
case ed25519.KeyType, sr25519.KeyType:
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
@@ -40,3 +40,15 @@ type Symmetric interface {
|
||||
Encrypt(plaintext []byte, secret []byte) (ciphertext []byte)
|
||||
Decrypt(ciphertext []byte, secret []byte) (plaintext []byte, err error)
|
||||
}
|
||||
|
||||
// If a new key type implements batch verification,
|
||||
// the key type must be registered in github.com/tendermint/tendermint/crypto/batch
|
||||
type BatchVerifier interface {
|
||||
// Add appends an entry into the BatchVerifier.
|
||||
Add(key PubKey, message, signature []byte) error
|
||||
// Verify verifies all the entries in the BatchVerifier, and returns
|
||||
// if every signature in the batch is valid, and a vector of bools
|
||||
// indicating the verification status of each signature (in the order
|
||||
// that signatures were added to the batch).
|
||||
Verify() (bool, []bool)
|
||||
}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package ed25519
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/internal/benchmarking"
|
||||
)
|
||||
@@ -24,3 +26,42 @@ func BenchmarkVerification(b *testing.B) {
|
||||
priv := GenPrivKey()
|
||||
benchmarking.BenchmarkVerification(b, priv)
|
||||
}
|
||||
|
||||
func BenchmarkVerifyBatch(b *testing.B) {
|
||||
msg := []byte("BatchVerifyTest")
|
||||
|
||||
for _, sigsCount := range []int{1, 8, 64, 1024} {
|
||||
sigsCount := sigsCount
|
||||
b.Run(fmt.Sprintf("sig-count-%d", sigsCount), func(b *testing.B) {
|
||||
// Pre-generate all of the keys, and signatures, but do not
|
||||
// benchmark key-generation and signing.
|
||||
pubs := make([]crypto.PubKey, 0, sigsCount)
|
||||
sigs := make([][]byte, 0, sigsCount)
|
||||
for i := 0; i < sigsCount; i++ {
|
||||
priv := GenPrivKey()
|
||||
sig, _ := priv.Sign(msg)
|
||||
pubs = append(pubs, priv.PubKey().(PubKey))
|
||||
sigs = append(sigs, sig)
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
b.ReportAllocs()
|
||||
// NOTE: dividing by n so that metrics are per-signature
|
||||
for i := 0; i < b.N/sigsCount; i++ {
|
||||
// The benchmark could just benchmark the Verify()
|
||||
// routine, but there is non-trivial overhead associated
|
||||
// with BatchVerifier.Add(), which should be included
|
||||
// in the benchmark.
|
||||
v := NewBatchVerifier()
|
||||
for i := 0; i < sigsCount; i++ {
|
||||
err := v.Add(pubs[i], msg, sigs[i])
|
||||
require.NoError(b, err)
|
||||
}
|
||||
|
||||
if ok, _ := v.Verify(); !ok {
|
||||
b.Fatal("signature set failed batch verification")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,12 +2,13 @@ package ed25519
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/ed25519"
|
||||
"crypto/subtle"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/hdevalence/ed25519consensus"
|
||||
"github.com/oasisprotocol/curve25519-voi/primitives/ed25519"
|
||||
"github.com/oasisprotocol/curve25519-voi/primitives/ed25519/extra/cache"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
@@ -16,7 +17,18 @@ import (
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
var _ crypto.PrivKey = PrivKey{}
|
||||
var (
|
||||
_ crypto.PrivKey = PrivKey{}
|
||||
|
||||
// curve25519-voi's Ed25519 implementation supports configurable
|
||||
// verification behavior, and tendermint uses the ZIP-215 verification
|
||||
// semantics.
|
||||
verifyOptions = &ed25519.Options{
|
||||
Verify: ed25519.VerifyOptionsZIP_215,
|
||||
}
|
||||
|
||||
cachingVerifier = cache.NewVerifier(cache.NewLRUCache(cacheSize))
|
||||
)
|
||||
|
||||
const (
|
||||
PrivKeyName = "tendermint/PrivKeyEd25519"
|
||||
@@ -33,6 +45,14 @@ const (
|
||||
SeedSize = 32
|
||||
|
||||
KeyType = "ed25519"
|
||||
|
||||
// cacheSize is the number of public keys that will be cached in
|
||||
// an expanded format for repeated signature verification.
|
||||
//
|
||||
// TODO/perf: Either this should exclude single verification, or be
|
||||
// tuned to `> validatorSize + maxTxnsPerBlock` to avoid cache
|
||||
// thrashing.
|
||||
cacheSize = 4096
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -106,14 +126,12 @@ func GenPrivKey() PrivKey {
|
||||
|
||||
// genPrivKey generates a new ed25519 private key using the provided reader.
|
||||
func genPrivKey(rand io.Reader) PrivKey {
|
||||
seed := make([]byte, SeedSize)
|
||||
|
||||
_, err := io.ReadFull(rand, seed)
|
||||
_, priv, err := ed25519.GenerateKey(rand)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return PrivKey(ed25519.NewKeyFromSeed(seed))
|
||||
return PrivKey(priv)
|
||||
}
|
||||
|
||||
// GenPrivKeyFromSecret hashes the secret with SHA2, and uses
|
||||
@@ -152,7 +170,7 @@ func (pubKey PubKey) VerifySignature(msg []byte, sig []byte) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
return ed25519consensus.Verify(ed25519.PublicKey(pubKey), msg, sig)
|
||||
return cachingVerifier.VerifyWithOptions(ed25519.PublicKey(pubKey), msg, sig, verifyOptions)
|
||||
}
|
||||
|
||||
func (pubKey PubKey) String() string {
|
||||
@@ -170,3 +188,40 @@ func (pubKey PubKey) Equals(other crypto.PubKey) bool {
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
var _ crypto.BatchVerifier = &BatchVerifier{}
|
||||
|
||||
// BatchVerifier implements batch verification for ed25519.
|
||||
type BatchVerifier struct {
|
||||
*ed25519.BatchVerifier
|
||||
}
|
||||
|
||||
func NewBatchVerifier() crypto.BatchVerifier {
|
||||
return &BatchVerifier{ed25519.NewBatchVerifier()}
|
||||
}
|
||||
|
||||
func (b *BatchVerifier) Add(key crypto.PubKey, msg, signature []byte) error {
|
||||
pkEd, ok := key.(PubKey)
|
||||
if !ok {
|
||||
return fmt.Errorf("pubkey is not Ed25519")
|
||||
}
|
||||
|
||||
pkBytes := pkEd.Bytes()
|
||||
|
||||
if l := len(pkBytes); l != PubKeySize {
|
||||
return fmt.Errorf("pubkey size is incorrect; expected: %d, got %d", PubKeySize, l)
|
||||
}
|
||||
|
||||
// check that the signature is the correct length
|
||||
if len(signature) != SignatureSize {
|
||||
return errors.New("invalid signature")
|
||||
}
|
||||
|
||||
cachingVerifier.AddWithOptions(b.BatchVerifier, ed25519.PublicKey(pkBytes), msg, signature, verifyOptions)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BatchVerifier) Verify() (bool, []bool) {
|
||||
return b.BatchVerifier.Verify(crypto.CReader())
|
||||
}
|
||||
|
||||
@@ -28,3 +28,28 @@ func TestSignAndValidateEd25519(t *testing.T) {
|
||||
|
||||
assert.False(t, pubKey.VerifySignature(msg, sig))
|
||||
}
|
||||
|
||||
func TestBatchSafe(t *testing.T) {
|
||||
v := ed25519.NewBatchVerifier()
|
||||
|
||||
for i := 0; i <= 38; i++ {
|
||||
priv := ed25519.GenPrivKey()
|
||||
pub := priv.PubKey()
|
||||
|
||||
var msg []byte
|
||||
if i%2 == 0 {
|
||||
msg = []byte("easter")
|
||||
} else {
|
||||
msg = []byte("egg")
|
||||
}
|
||||
|
||||
sig, err := priv.Sign(msg)
|
||||
require.NoError(t, err)
|
||||
|
||||
err = v.Add(pub, msg, sig)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
ok, _ := v.Verify()
|
||||
require.True(t, ok)
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user