mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-13 16:22:53 +00:00
Compare commits
753 Commits
feature/ab
...
wb/debug-d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1f2fb500ae | ||
|
|
b86acbbeab | ||
|
|
9e41414a53 | ||
|
|
6ff4c3139c | ||
|
|
e87b0391cb | ||
|
|
4f73748bc8 | ||
|
|
9a3861fb82 | ||
|
|
44ac57489f | ||
|
|
76376e3161 | ||
|
|
dd97ac6e1c | ||
|
|
a751eee7f2 | ||
|
|
c5dc3b267f | ||
|
|
93f462ef86 | ||
|
|
91e277d7b7 | ||
|
|
a341a626e0 | ||
|
|
c3ae6f5b58 | ||
|
|
a393cf8bab | ||
|
|
0e2752ae42 | ||
|
|
97a8f125e0 | ||
|
|
84c15857e4 | ||
|
|
e70445f942 | ||
|
|
478f5321ad | ||
|
|
08e4e2ed3d | ||
|
|
7d63e991c5 | ||
|
|
7638235d33 | ||
|
|
2abfe20114 | ||
|
|
0bf7813c4e | ||
|
|
ff9038e2ce | ||
|
|
00a40835a2 | ||
|
|
c4f77ab6d1 | ||
|
|
2030875056 | ||
|
|
639e145729 | ||
|
|
68ffe8bc64 | ||
|
|
21309ccb7b | ||
|
|
f70396c6fd | ||
|
|
fdc246e4a8 | ||
|
|
78a0a5fe73 | ||
|
|
4f885209aa | ||
|
|
6dd0cf92c8 | ||
|
|
626d9b4fbe | ||
|
|
8addf99f90 | ||
|
|
76c6c67734 | ||
|
|
a46724e4f6 | ||
|
|
40fba3960d | ||
|
|
36a859ae54 | ||
|
|
ab5c63eff3 | ||
|
|
8228936155 | ||
|
|
a12e2bbb60 | ||
|
|
11bebfb6a0 | ||
|
|
4009102e2b | ||
|
|
cabd916517 | ||
|
|
363ea56680 | ||
|
|
aa4854ff8f | ||
|
|
581dd01d47 | ||
|
|
50b00dff71 | ||
|
|
051e127d38 | ||
|
|
5530726df8 | ||
|
|
decac693ab | ||
|
|
7ca0f24040 | ||
|
|
69848bef26 | ||
|
|
2c14d491f6 | ||
|
|
cd248576ea | ||
|
|
c256edc622 | ||
|
|
9d9360774f | ||
|
|
c7c11fc7d5 | ||
|
|
37bc1d74df | ||
|
|
d882f31569 | ||
|
|
ba3f7106b1 | ||
|
|
3ccfb26137 | ||
|
|
96863decca | ||
|
|
d4cda544ae | ||
|
|
800cce80b7 | ||
|
|
e850863296 | ||
|
|
1dec3e139a | ||
|
|
11b920480f | ||
|
|
4f8bcb1cce | ||
|
|
2d95e38986 | ||
|
|
6bb4b688e0 | ||
|
|
a1e1e6c290 | ||
|
|
736364178a | ||
|
|
a99c7188d7 | ||
|
|
a56b10fbef | ||
|
|
15aa35b8d7 | ||
|
|
c769e3e09b | ||
|
|
4334135651 | ||
|
|
ba41d29b50 | ||
|
|
0cb0dd7ce9 | ||
|
|
b2502b5e53 | ||
|
|
0e9bec1b53 | ||
|
|
20720f691c | ||
|
|
e9bafca335 | ||
|
|
414130aee1 | ||
|
|
b0a413eb17 | ||
|
|
b317df4ba5 | ||
|
|
a8bb01136a | ||
|
|
d66d1ada5b | ||
|
|
9d0817b308 | ||
|
|
5ae3c24d55 | ||
|
|
ffadc0ebca | ||
|
|
f8dbbae55e | ||
|
|
ad0c115839 | ||
|
|
98a249c0ea | ||
|
|
09ec247646 | ||
|
|
7eef4d7ac2 | ||
|
|
11a71c228c | ||
|
|
c5cc3c8d3f | ||
|
|
167fa738a3 | ||
|
|
917180dfd2 | ||
|
|
10d174adcc | ||
|
|
9ffa7e8a2b | ||
|
|
2cc872543b | ||
|
|
ae5f98881b | ||
|
|
4c30f084da | ||
|
|
2b0a3c151b | ||
|
|
6e238b5b9d | ||
|
|
144af9a81a | ||
|
|
bed58a749f | ||
|
|
5f553bb3ee | ||
|
|
e8f38e3260 | ||
|
|
6ade17997c | ||
|
|
a6b30faf35 | ||
|
|
09a409df9c | ||
|
|
d228afc548 | ||
|
|
25bb556fee | ||
|
|
c0f7fb08c0 | ||
|
|
7bc7de5092 | ||
|
|
7d961b55b2 | ||
|
|
886519e3ca | ||
|
|
8d0c38257e | ||
|
|
10a41ae6c8 | ||
|
|
e79262415c | ||
|
|
13b95e7127 | ||
|
|
c4cf4d3012 | ||
|
|
21536ac7a4 | ||
|
|
cb63ab4ac0 | ||
|
|
66926d31ca | ||
|
|
74af343f28 | ||
|
|
10fa151749 | ||
|
|
2c81638787 | ||
|
|
a456b71f1f | ||
|
|
32bc399bdd | ||
|
|
4e59575dc0 | ||
|
|
d2b78ec09b | ||
|
|
b824dd3b47 | ||
|
|
2b0955849e | ||
|
|
d6b4bc267b | ||
|
|
6f6ac5c04e | ||
|
|
9c172a1be9 | ||
|
|
2f6141645b | ||
|
|
a855f96946 | ||
|
|
31e7cdeeac | ||
|
|
7cc32f3f0f | ||
|
|
3635c7a382 | ||
|
|
b28887e839 | ||
|
|
eaa0468190 | ||
|
|
00c284d9d7 | ||
|
|
08b134ddbc | ||
|
|
39ddfc24f4 | ||
|
|
663c0bba9c | ||
|
|
618c945d54 | ||
|
|
40fe4a3bf8 | ||
|
|
7ec123c968 | ||
|
|
1f46a4c90e | ||
|
|
96db0ae84a | ||
|
|
915d9de91d | ||
|
|
1e4bc04cd6 | ||
|
|
4e06dfef8c | ||
|
|
b4307ca7f4 | ||
|
|
e3d5a31d6e | ||
|
|
c0fcc5f6bb | ||
|
|
7bf84d9d7f | ||
|
|
89b3b10740 | ||
|
|
1e0efb6758 | ||
|
|
1709e49813 | ||
|
|
97202b06c3 | ||
|
|
6fcf718572 | ||
|
|
de5cf42ed5 | ||
|
|
3faae852a8 | ||
|
|
711a718162 | ||
|
|
76a773d24c | ||
|
|
692f23d589 | ||
|
|
11b5885894 | ||
|
|
e9928f6186 | ||
|
|
68f4a210d3 | ||
|
|
6b353df567 | ||
|
|
07a1b4d618 | ||
|
|
719e028e00 | ||
|
|
72ee5aab26 | ||
|
|
09e0df8479 | ||
|
|
2d0b3a300f | ||
|
|
e2a26c732c | ||
|
|
d9134063e7 | ||
|
|
7b5a732644 | ||
|
|
527dbaaa66 | ||
|
|
a9e231b54d | ||
|
|
0781ca3f50 | ||
|
|
ee230cd0f6 | ||
|
|
fa891c5a4b | ||
|
|
15a67b37d8 | ||
|
|
f969614dc8 | ||
|
|
9cee35bb8c | ||
|
|
230abbe676 | ||
|
|
7648544f27 | ||
|
|
6cd07682ae | ||
|
|
dfa28b77c5 | ||
|
|
c29aaf78d4 | ||
|
|
c320eb1407 | ||
|
|
a9fc0c32b2 | ||
|
|
fdd506a126 | ||
|
|
811dbe439f | ||
|
|
d1c9463bef | ||
|
|
9e2af66a0a | ||
|
|
6abfa1d52f | ||
|
|
64e7b5efea | ||
|
|
e334555393 | ||
|
|
4dded9c852 | ||
|
|
a91680efee | ||
|
|
6fdf665385 | ||
|
|
0b0914b3df | ||
|
|
0740d8626c | ||
|
|
dcc2556e08 | ||
|
|
ec5e3b0b02 | ||
|
|
ea601ccf15 | ||
|
|
09b2aa1bfa | ||
|
|
f366ae3c87 | ||
|
|
7f30bc96f0 | ||
|
|
bc643b19c4 | ||
|
|
76edf7851f | ||
|
|
09a6ad7b1e | ||
|
|
1ec959f8a1 | ||
|
|
1c63d81bd3 | ||
|
|
8eccaf9535 | ||
|
|
20610be98c | ||
|
|
0407cd5258 | ||
|
|
10fc43f182 | ||
|
|
3c50c5a845 | ||
|
|
be2ac87ab0 | ||
|
|
7362756354 | ||
|
|
9efc20c963 | ||
|
|
0bd497352a | ||
|
|
ee70430255 | ||
|
|
0d6055a4db | ||
|
|
399c366185 | ||
|
|
36d8cb09df | ||
|
|
990504cd07 | ||
|
|
aa93ad8a15 | ||
|
|
3503a814f2 | ||
|
|
43eacd159f | ||
|
|
5bafedff17 | ||
|
|
d36a5905a6 | ||
|
|
fea04c8e67 | ||
|
|
05bc2f344a | ||
|
|
fce665ef06 | ||
|
|
8e8de2b2fb | ||
|
|
0c2c0afaf8 | ||
|
|
281b5234b2 | ||
|
|
4d03d7516b | ||
|
|
47b28fd6aa | ||
|
|
ca7dbea05b | ||
|
|
c9125a6625 | ||
|
|
570e24f902 | ||
|
|
2b8aa65e4f | ||
|
|
3761aa6904 | ||
|
|
d4d2b66067 | ||
|
|
be48602c6b | ||
|
|
0dde4826e7 | ||
|
|
b517dd5685 | ||
|
|
f563bd4ff1 | ||
|
|
d0828f3454 | ||
|
|
b13b7620b4 | ||
|
|
3a690568ae | ||
|
|
8b7a53bdb2 | ||
|
|
ae2646a371 | ||
|
|
a818f914ab | ||
|
|
b272746444 | ||
|
|
0f41f7465c | ||
|
|
0b770870c9 | ||
|
|
dbfd6fc613 | ||
|
|
845efdb76e | ||
|
|
cae3a75121 | ||
|
|
d3570c6734 | ||
|
|
10f4b7de64 | ||
|
|
6d9372bd39 | ||
|
|
bcdf923cb8 | ||
|
|
1c4dbe30d4 | ||
|
|
46e06c9732 | ||
|
|
053651160f | ||
|
|
358d1a28b8 | ||
|
|
f2f085c7a3 | ||
|
|
19393f0c28 | ||
|
|
0811c7be99 | ||
|
|
d0b513c182 | ||
|
|
3ed8f14bf9 | ||
|
|
70ea675f38 | ||
|
|
e2dc241c9d | ||
|
|
c62e320ffd | ||
|
|
91506bf25d | ||
|
|
cbae3613dd | ||
|
|
32ee737d42 | ||
|
|
6a34fd8fc3 | ||
|
|
9ecfcc93a6 | ||
|
|
63f6c26072 | ||
|
|
d988cd6101 | ||
|
|
ca080b5a7f | ||
|
|
0124593a61 | ||
|
|
19a375aaab | ||
|
|
b5e4147e5a | ||
|
|
a554005136 | ||
|
|
2ceb816721 | ||
|
|
3f9066b290 | ||
|
|
7d53cb232c | ||
|
|
1d671a147f | ||
|
|
d43a79ddf6 | ||
|
|
003f394512 | ||
|
|
99c73dd3c1 | ||
|
|
206479f1ec | ||
|
|
7d68a56f32 | ||
|
|
6f6083dae3 | ||
|
|
dd84867e2b | ||
|
|
9f7051d38a | ||
|
|
5c547137f6 | ||
|
|
c52de487e7 | ||
|
|
acbe3f6570 | ||
|
|
c553c0cf20 | ||
|
|
fa51b652e8 | ||
|
|
b00962ed57 | ||
|
|
efd2fde474 | ||
|
|
fa781e6bb7 | ||
|
|
6ffdf181f2 | ||
|
|
bf8cce83db | ||
|
|
799f87b55b | ||
|
|
72c9364154 | ||
|
|
6b4024ad75 | ||
|
|
00b9524168 | ||
|
|
884d4d5252 | ||
|
|
1c314c830e | ||
|
|
d27cc6e5e9 | ||
|
|
5a7ac0abdb | ||
|
|
418e2c140f | ||
|
|
4540bef665 | ||
|
|
6625d125a9 | ||
|
|
0566bea8b2 | ||
|
|
8c539f7c2b | ||
|
|
be88fd3e63 | ||
|
|
cab10db725 | ||
|
|
90fe178b52 | ||
|
|
0d0181856b | ||
|
|
36d92cd0b6 | ||
|
|
fe4e97afe0 | ||
|
|
ddba7f931a | ||
|
|
089b314bdb | ||
|
|
37d36cd5bc | ||
|
|
e2384a00ce | ||
|
|
f57aaa4925 | ||
|
|
611dd5ad46 | ||
|
|
fc5a108d53 | ||
|
|
e9e5026dac | ||
|
|
bd14bb66e9 | ||
|
|
45572117ae | ||
|
|
0f4124fb54 | ||
|
|
be35c893d3 | ||
|
|
c21f4c6fde | ||
|
|
d5cf7831f1 | ||
|
|
9498cd80bd | ||
|
|
c1ca749afa | ||
|
|
98bbde5385 | ||
|
|
42f6c40751 | ||
|
|
6912c34b58 | ||
|
|
5a39f72aa8 | ||
|
|
93719c2eb3 | ||
|
|
ab69153037 | ||
|
|
29360f955c | ||
|
|
27eb10a2e7 | ||
|
|
8a3637a249 | ||
|
|
90c290ac52 | ||
|
|
54a14cfeed | ||
|
|
a48c6bdf5e | ||
|
|
27b05d6b39 | ||
|
|
6cb4b5d8ad | ||
|
|
07cf71146c | ||
|
|
df0bf4b0ee | ||
|
|
9f697db54f | ||
|
|
bc3e3d134e | ||
|
|
5b52f87789 | ||
|
|
b6be889b97 | ||
|
|
16bbe8c862 | ||
|
|
10b9fb9f21 | ||
|
|
809a5e876c | ||
|
|
670b27fe3c | ||
|
|
df0b868415 | ||
|
|
195916fa32 | ||
|
|
3e6f9329c1 | ||
|
|
2d57043aa4 | ||
|
|
c8a661091d | ||
|
|
d4b04776bd | ||
|
|
a9bad33bac | ||
|
|
f8bacae0c7 | ||
|
|
42650a6a65 | ||
|
|
162f67cf26 | ||
|
|
059d42866c | ||
|
|
79ff92f5bb | ||
|
|
26fd15800d | ||
|
|
e00ffc42d7 | ||
|
|
38c5d28f87 | ||
|
|
ee3f34f4ab | ||
|
|
02b6712379 | ||
|
|
1608484de8 | ||
|
|
aaf8987257 | ||
|
|
b9b55db4e5 | ||
|
|
c4c47f2e7a | ||
|
|
8dd715a028 | ||
|
|
5fd0a3e9e4 | ||
|
|
78a05d3b9e | ||
|
|
c87001ae0d | ||
|
|
740008e32b | ||
|
|
17c511db67 | ||
|
|
9b6d6a3ad0 | ||
|
|
8ea2ac0db7 | ||
|
|
91e1f3548a | ||
|
|
2aad26e2f1 | ||
|
|
fd597dc726 | ||
|
|
059364e840 | ||
|
|
5f88d6aa44 | ||
|
|
b0dfbd1832 | ||
|
|
c7b619188d | ||
|
|
3d01d98f67 | ||
|
|
0b953eb190 | ||
|
|
90d3f56797 | ||
|
|
9d1f77369f | ||
|
|
2a2279e010 | ||
|
|
1cd9bdb80b | ||
|
|
fc71882f74 | ||
|
|
1f39f808e1 | ||
|
|
50b8907581 | ||
|
|
17905cbaa2 | ||
|
|
60bc071ed5 | ||
|
|
b1646e51e2 | ||
|
|
a54f1544f8 | ||
|
|
1f01e5d726 | ||
|
|
c900303ac6 | ||
|
|
363804ac21 | ||
|
|
5a9b740acb | ||
|
|
aead4ab555 | ||
|
|
bd8a9372d2 | ||
|
|
70bb8cc8b7 | ||
|
|
4dca066aab | ||
|
|
6e3c58204a | ||
|
|
f54f80bf0d | ||
|
|
8ce254cdb7 | ||
|
|
a2e684e51f | ||
|
|
3759bc511b | ||
|
|
06de7459c9 | ||
|
|
642ecc3f5c | ||
|
|
b19acfb605 | ||
|
|
937a18468a | ||
|
|
fe5b312337 | ||
|
|
7ea8746ed1 | ||
|
|
51aca684b8 | ||
|
|
8718f6f5ff | ||
|
|
91823eba32 | ||
|
|
b3aae970d8 | ||
|
|
13e772c916 | ||
|
|
81daaacae9 | ||
|
|
a741314c97 | ||
|
|
9e158839f6 | ||
|
|
aecfb0ecf0 | ||
|
|
680fb18414 | ||
|
|
962a82c06e | ||
|
|
d76add65a6 | ||
|
|
7e0436c6e6 | ||
|
|
cd3ebe8754 | ||
|
|
b958ba3440 | ||
|
|
df22e7354c | ||
|
|
9c98af4277 | ||
|
|
ac49ea8bb7 | ||
|
|
a72fb2fbad | ||
|
|
68bd2116f0 | ||
|
|
670e9b427b | ||
|
|
15c1936b85 | ||
|
|
c63854f732 | ||
|
|
527550f372 | ||
|
|
64961e2267 | ||
|
|
eff1b16a0c | ||
|
|
ea77360ecf | ||
|
|
5972105b06 | ||
|
|
af723eca8a | ||
|
|
62d7a5d028 | ||
|
|
96215a06ed | ||
|
|
3ef0b90afd | ||
|
|
d34e7c5b51 | ||
|
|
d8a2eb95bb | ||
|
|
eaa948ab7d | ||
|
|
5cbb8263b4 | ||
|
|
a2bd09253c | ||
|
|
ca285844ea | ||
|
|
74bee8d834 | ||
|
|
211bc08217 | ||
|
|
178d421c77 | ||
|
|
bada08c50c | ||
|
|
956b59af87 | ||
|
|
f05788e632 | ||
|
|
5b698ed13b | ||
|
|
1d16e39c0e | ||
|
|
78e8169750 | ||
|
|
03a6fb2777 | ||
|
|
c61cd3fd05 | ||
|
|
385ea1db7d | ||
|
|
66ba12d9bc | ||
|
|
09cf0bcb01 | ||
|
|
e986602649 | ||
|
|
2c95b0b5e0 | ||
|
|
a0d4d85375 | ||
|
|
8bf77d9b1a | ||
|
|
1ccd23ca1d | ||
|
|
47f5650615 | ||
|
|
5b17c01e41 | ||
|
|
9b9222f461 | ||
|
|
0555772d3a | ||
|
|
1e1d087494 | ||
|
|
85353d9af5 | ||
|
|
1570d26f84 | ||
|
|
c75dee5a02 | ||
|
|
17ca6c6c98 | ||
|
|
46964f62db | ||
|
|
9c47b572f7 | ||
|
|
1b18d26644 | ||
|
|
d39eb74daa | ||
|
|
91bef75f62 | ||
|
|
b4ce1de44a | ||
|
|
1b6df6783d | ||
|
|
cc3c18a6a7 | ||
|
|
8e7d431f6f | ||
|
|
3c1416b3d7 | ||
|
|
aef1ac7ba5 | ||
|
|
84ff991387 | ||
|
|
bc1f1e5ffa | ||
|
|
c4730bb46a | ||
|
|
392acdc733 | ||
|
|
1128244f4f | ||
|
|
886442c111 | ||
|
|
c6691b91c2 | ||
|
|
0a41711091 | ||
|
|
77deb710fb | ||
|
|
6a056e050c | ||
|
|
bdf688debc | ||
|
|
72f041b759 | ||
|
|
8c0af72987 | ||
|
|
ced66e4eb5 | ||
|
|
be6c016664 | ||
|
|
085fd66f33 | ||
|
|
2bb2af19e4 | ||
|
|
ebff8a96a5 | ||
|
|
6ef81c6074 | ||
|
|
e198edf20e | ||
|
|
bcfc889f25 | ||
|
|
6dce4ef701 | ||
|
|
1547a7e6c1 | ||
|
|
e3b96107af | ||
|
|
5aa859c370 | ||
|
|
41c067c474 | ||
|
|
3283a84ab2 | ||
|
|
c1be58a27b | ||
|
|
a24e00dda6 | ||
|
|
d01e6998a7 | ||
|
|
d8ae450784 | ||
|
|
28e79a4d02 | ||
|
|
0565eb5943 | ||
|
|
a879eb444d | ||
|
|
cdc217357d | ||
|
|
f9c54d2710 | ||
|
|
afb50425b3 | ||
|
|
15bf3a1509 | ||
|
|
89e908e340 | ||
|
|
8e80c26e80 | ||
|
|
b35e9ff53e | ||
|
|
4dcc9e979c | ||
|
|
0d108adaec | ||
|
|
cdf1e251f7 | ||
|
|
607565f34e | ||
|
|
42041f71b5 | ||
|
|
2c16ae99ee | ||
|
|
79890d8393 | ||
|
|
5882489bba | ||
|
|
8a80b97865 | ||
|
|
9205e85a9b | ||
|
|
243ff4b43d | ||
|
|
bcb7044d64 | ||
|
|
77d7328bc6 | ||
|
|
e820e68acd | ||
|
|
33dbff61d3 | ||
|
|
f368b91caf | ||
|
|
b1bbd37519 | ||
|
|
141d9c814d | ||
|
|
e13b4386ff | ||
|
|
0de4bec862 | ||
|
|
781f4badc3 | ||
|
|
27e8cea9ce | ||
|
|
98234b1171 | ||
|
|
6f9f8b58ae | ||
|
|
85e94161cd | ||
|
|
4988877f19 | ||
|
|
ba256b383b | ||
|
|
69dd8fea9d | ||
|
|
170cb70e19 | ||
|
|
3ad1157451 | ||
|
|
095e9cd7ef | ||
|
|
2c2120691c | ||
|
|
8b29622fe2 | ||
|
|
6bee97160f | ||
|
|
64101f5ac9 | ||
|
|
3246283cf2 | ||
|
|
f97a498cee | ||
|
|
68dc751a8c | ||
|
|
b435d9aae5 | ||
|
|
e3728e7709 | ||
|
|
f2f6a78809 | ||
|
|
b2d72dce7e | ||
|
|
909da42789 | ||
|
|
4e71357808 | ||
|
|
fbf2309962 | ||
|
|
39e81807a3 | ||
|
|
2d0fcf498d | ||
|
|
deb4f60613 | ||
|
|
a736530e01 | ||
|
|
e0950515ff | ||
|
|
8aa47c7da5 | ||
|
|
2f5e454892 | ||
|
|
e9294de946 | ||
|
|
fdecfa177d | ||
|
|
8bd3d5105f | ||
|
|
95cff1efb4 | ||
|
|
eb0d353767 | ||
|
|
a399fae100 | ||
|
|
baa20a4b9c | ||
|
|
af645ac778 | ||
|
|
adcfe80f09 | ||
|
|
7041cae8e1 | ||
|
|
fa522ca323 | ||
|
|
1181cfbd77 | ||
|
|
e7d7ad85d5 | ||
|
|
b508045eff | ||
|
|
ca46cbc781 | ||
|
|
bf35cc6443 | ||
|
|
27895a27a4 | ||
|
|
9fe7b4fe77 | ||
|
|
ec32df2d8a | ||
|
|
d0c87ff27e | ||
|
|
e52f9de148 | ||
|
|
8ae5c60637 | ||
|
|
47687dba31 | ||
|
|
865234e113 | ||
|
|
fdb7421ae8 | ||
|
|
a65c23a526 | ||
|
|
335e97433c | ||
|
|
3922dde05d | ||
|
|
83c7bd17bf | ||
|
|
f471affad5 | ||
|
|
0e9798f39f | ||
|
|
627f7b5989 | ||
|
|
8e6194626e | ||
|
|
886235311f | ||
|
|
b5d9da5d89 | ||
|
|
bcf9b0aa39 | ||
|
|
cafad28293 | ||
|
|
cd41091b18 | ||
|
|
53022220f6 | ||
|
|
651d8f087b | ||
|
|
1488b0a33b | ||
|
|
3be4800810 | ||
|
|
eeb92a632b | ||
|
|
d0db59e16c | ||
|
|
38587d83c4 | ||
|
|
80b9eb8f0f | ||
|
|
7daf6a1a03 | ||
|
|
10dda219a1 | ||
|
|
d1ef5028a0 | ||
|
|
20d66803c5 | ||
|
|
50b91867c3 | ||
|
|
d11e5993b1 | ||
|
|
99f645d200 | ||
|
|
e0e006d10f | ||
|
|
d4f906609a | ||
|
|
a24a58207a | ||
|
|
8c5fe166a6 | ||
|
|
17383be202 | ||
|
|
257b34b459 | ||
|
|
9e6248c0d7 | ||
|
|
b4adeab8b9 | ||
|
|
d785036e0b | ||
|
|
c206d9b680 | ||
|
|
3dabfbeae0 | ||
|
|
047267bbc8 | ||
|
|
6e16df8547 | ||
|
|
a5d3e19b4a | ||
|
|
6ef7b316cd | ||
|
|
e7184c499d | ||
|
|
f0d4ddcf3c | ||
|
|
7ccee61557 | ||
|
|
2fff29f340 | ||
|
|
b6979e7fbd | ||
|
|
ca1891944b | ||
|
|
ce824d6fad | ||
|
|
c6f8f0aefc | ||
|
|
8a67968416 | ||
|
|
61ee6b0b5d | ||
|
|
1a74e01d18 | ||
|
|
741a515f5b | ||
|
|
55e8ccab21 | ||
|
|
e1644d00c5 | ||
|
|
346aa14db5 | ||
|
|
7121f68f25 | ||
|
|
710ed63f55 | ||
|
|
3de8d14baa | ||
|
|
6de4bb1b6b | ||
|
|
6a2a71be07 | ||
|
|
260cc5dd69 | ||
|
|
12ebd7735a | ||
|
|
93b9bab932 | ||
|
|
82e4693cc5 | ||
|
|
169fa6dcdb | ||
|
|
302aec6dcc | ||
|
|
10f30f8a99 | ||
|
|
07bb7fb445 | ||
|
|
46252a9b69 | ||
|
|
8d28e7467c | ||
|
|
b894f07380 | ||
|
|
6c77207055 | ||
|
|
a6b22cfa97 | ||
|
|
d7d0ffea13 | ||
|
|
7e27e9b852 | ||
|
|
090afe30f9 | ||
|
|
b9d98a61c2 | ||
|
|
250c3aa92e | ||
|
|
a4b7018732 | ||
|
|
bf9e36d02d | ||
|
|
08708046a7 | ||
|
|
dcdf9bbff8 | ||
|
|
433bdf5063 | ||
|
|
2a0fa665fd | ||
|
|
b0130b4661 | ||
|
|
01c32c62e8 | ||
|
|
99aea7b079 | ||
|
|
2672b91ab0 | ||
|
|
4f79930c12 | ||
|
|
1635d1339c | ||
|
|
9df66eaa7d | ||
|
|
b802e9c9c3 | ||
|
|
95367eaf51 | ||
|
|
7eb4e5c0b1 | ||
|
|
f02987e7bc | ||
|
|
7d2b3e305e | ||
|
|
ca8a404c7c | ||
|
|
ecd3cbc2bd |
3
.github/CODEOWNERS
vendored
3
.github/CODEOWNERS
vendored
@@ -7,6 +7,5 @@
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @ebuchman @tendermint/tendermint-engineering @adizere @lasarojc
|
||||
* @alexanderbez @ebuchman @cmwaters @tessr @tychoish @williambanfield
|
||||
|
||||
/spec @ebuchman @tendermint/tendermint-research @tendermint/tendermint-engineering @adizere @lasarojc
|
||||
|
||||
11
.github/ISSUE_TEMPLATE/bug-report.md
vendored
11
.github/ISSUE_TEMPLATE/bug-report.md
vendored
@@ -1,18 +1,13 @@
|
||||
---
|
||||
name: Bug report
|
||||
name: Bug Report
|
||||
about: Create a report to help us squash bugs!
|
||||
|
||||
---
|
||||
<!--
|
||||
|
||||
Please fill in as much of the template below as you can.
|
||||
|
||||
If you have general questions, please create a new discussion:
|
||||
https://github.com/tendermint/tendermint/discussions
|
||||
|
||||
Be ready for followup questions, and please respond in a timely manner. We might
|
||||
ask you to provide additional logs and data (tendermint & app).
|
||||
|
||||
Be ready for followup questions, and please respond in a timely
|
||||
manner. We might ask you to provide additional logs and data (tendermint & app).
|
||||
-->
|
||||
|
||||
**Tendermint version** (use `tendermint version` or `git rev-parse --verify HEAD` if installed from source):
|
||||
|
||||
5
.github/ISSUE_TEMPLATE/config.yml
vendored
5
.github/ISSUE_TEMPLATE/config.yml
vendored
@@ -1,5 +0,0 @@
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: Ask a question
|
||||
url: https://github.com/tendermint/tendermint/discussions
|
||||
about: Please ask and answer questions here
|
||||
11
.github/ISSUE_TEMPLATE/feature-request.md
vendored
11
.github/ISSUE_TEMPLATE/feature-request.md
vendored
@@ -1,5 +1,5 @@
|
||||
---
|
||||
name: Feature request
|
||||
name: Feature Request
|
||||
about: Create a proposal to request a feature
|
||||
|
||||
---
|
||||
@@ -25,3 +25,12 @@ Are there any disadvantages of including this feature? -->
|
||||
## Proposal
|
||||
|
||||
<!-- Detailed description of requirements of implementation -->
|
||||
|
||||
____
|
||||
|
||||
#### For Admin Use
|
||||
|
||||
- [ ] Not duplicate issue
|
||||
- [ ] Appropriate labels applied
|
||||
- [ ] Appropriate contributors tagged
|
||||
- [ ] Contributor assigned/self-assigned
|
||||
|
||||
28
.github/ISSUE_TEMPLATE/proposal.md
vendored
28
.github/ISSUE_TEMPLATE/proposal.md
vendored
@@ -1,28 +0,0 @@
|
||||
---
|
||||
name: Protocol change proposal
|
||||
about: Create a proposal to request a change to the protocol
|
||||
|
||||
---
|
||||
|
||||
<!-- < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < ☺
|
||||
v ✰ Thanks for opening an issue! ✰
|
||||
v Before smashing the submit button please review the template.
|
||||
v Word of caution: Under-specified proposals may be rejected summarily
|
||||
☺ > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > -->
|
||||
|
||||
# Protocol Change Proposal
|
||||
|
||||
## Summary
|
||||
|
||||
<!-- Short, concise description of the proposed change -->
|
||||
|
||||
## Problem Definition
|
||||
|
||||
<!-- Why do we need this change?
|
||||
What problems may be addressed by introducing this change?
|
||||
What benefits does Tendermint stand to gain by including this change?
|
||||
Are there any disadvantages of including this change? -->
|
||||
|
||||
## Proposal
|
||||
|
||||
<!-- Detailed description of requirements of implementation -->
|
||||
33
.github/PULL_REQUEST_TEMPLATE.md
vendored
33
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,32 +1,7 @@
|
||||
<!--
|
||||
Please add a description of the changes that this PR introduces and the files that
|
||||
are the most critical to review.
|
||||
|
||||
Please add a reference to the issue that this PR addresses and indicate which
|
||||
files are most critical to review. If it fully addresses a particular issue,
|
||||
please include "Closes #XXX" (where "XXX" is the issue number).
|
||||
If this PR fixes an open Issue, please include "Closes #XXX" (where "XXX" is the Issue number)
|
||||
so that GitHub will automatically close the Issue when this PR is merged.
|
||||
|
||||
If this PR is non-trivial/large/complex, please ensure that you have either
|
||||
created an issue that the team's had a chance to respond to, or had some
|
||||
discussion with the team prior to submitting substantial pull requests. The team
|
||||
can be reached via GitHub Discussions or the Cosmos Network Discord server in
|
||||
the #tendermint-core channel. GitHub Discussions is preferred over Discord as it
|
||||
allows us to keep track of conversations topically.
|
||||
https://github.com/tendermint/tendermint/discussions
|
||||
|
||||
If the work in this PR is not aligned with the team's current priorities, please
|
||||
be advised that it may take some time before it is merged - especially if it has
|
||||
not yet been discussed with the team.
|
||||
|
||||
See the project board for the team's current priorities:
|
||||
https://github.com/orgs/tendermint/projects/15/views/5
|
||||
|
||||
-->
|
||||
|
||||
---
|
||||
|
||||
#### PR checklist
|
||||
|
||||
- [ ] Tests written/updated, or no tests needed
|
||||
- [ ] `CHANGELOG_PENDING.md` updated, or no changelog entry needed
|
||||
- [ ] Updated relevant documentation (`docs/`) and code comments, or no
|
||||
documentation updates needed
|
||||
|
||||
|
||||
16
.github/auto-comment.yml
vendored
Normal file
16
.github/auto-comment.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
pullRequestOpened: |
|
||||
:wave: Thanks for creating a PR!
|
||||
|
||||
Before we can merge this PR, please make sure that all the following items have been
|
||||
checked off. If any of the checklist items are not applicable, please leave them but
|
||||
write a little note why.
|
||||
|
||||
- [ ] Wrote tests
|
||||
- [ ] Updated CHANGELOG_PENDING.md
|
||||
- [ ] Linked to Github issue with discussion and accepted design OR link to spec that describes this work.
|
||||
- [ ] Updated relevant documentation (`docs/`) and code comments
|
||||
- [ ] Re-reviewed `Files changed` in the Github PR explorer
|
||||
- [ ] Applied Appropriate Labels
|
||||
|
||||
|
||||
Thank you for your contribution to Tendermint! :rocket:
|
||||
14
.github/codecov.yml
vendored
14
.github/codecov.yml
vendored
@@ -5,14 +5,19 @@ coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
threshold: 20%
|
||||
patch: off
|
||||
threshold: 1%
|
||||
patch: on
|
||||
changes: off
|
||||
|
||||
github_checks:
|
||||
annotations: false
|
||||
|
||||
comment: false
|
||||
comment:
|
||||
layout: "diff, files"
|
||||
behavior: default
|
||||
require_changes: no
|
||||
require_base: no
|
||||
require_head: yes
|
||||
|
||||
ignore:
|
||||
- "docs"
|
||||
@@ -20,6 +25,3 @@ ignore:
|
||||
- "scripts"
|
||||
- "**/*.pb.go"
|
||||
- "libs/pubsub/query/query.peg.go"
|
||||
- "*.md"
|
||||
- "*.rst"
|
||||
- "*.yml"
|
||||
|
||||
70
.github/dependabot.yml
vendored
70
.github/dependabot.yml
vendored
@@ -3,73 +3,25 @@ updates:
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "main"
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "v0.37.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "v0.34.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: npm
|
||||
directory: "/docs"
|
||||
schedule:
|
||||
interval: weekly
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
open-pull-requests-limit: 10
|
||||
|
||||
###################################
|
||||
##
|
||||
## Update All Go Dependencies
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "main"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
reviewers:
|
||||
- fadeev
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
target-branch: "v0.37.x"
|
||||
# Only allow automated security-related dependency updates on release
|
||||
# branches.
|
||||
open-pull-requests-limit: 0
|
||||
time: "11:00"
|
||||
open-pull-requests-limit: 10
|
||||
reviewers:
|
||||
- melekes
|
||||
- tessr
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
target-branch: "v0.34.x"
|
||||
# Only allow automated security-related dependency updates on release
|
||||
# branches.
|
||||
open-pull-requests-limit: 0
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
6
.github/issue_template.md
vendored
6
.github/issue_template.md
vendored
@@ -1,6 +0,0 @@
|
||||
<!--
|
||||
|
||||
If you want to ask a general question, please create a new discussion instead of
|
||||
an issue: https://github.com/tendermint/tendermint/discussions
|
||||
|
||||
-->
|
||||
8
.github/linter/markdownlint.yml
vendored
Normal file
8
.github/linter/markdownlint.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
default: true,
|
||||
MD007: { "indent": 4 }
|
||||
MD013: false
|
||||
MD024: { siblings_only: true }
|
||||
MD025: false
|
||||
MD033: { no-inline-html: false }
|
||||
no-hard-tabs: false
|
||||
whitespace: false
|
||||
15
.github/linters/markdownlint.yml
vendored
15
.github/linters/markdownlint.yml
vendored
@@ -1,15 +0,0 @@
|
||||
# markdownlint configuration for Super-Linter
|
||||
# - https://github.com/DavidAnson/markdownlint
|
||||
# - https://github.com/github/super-linter
|
||||
|
||||
# Default state for all rules
|
||||
default: true
|
||||
|
||||
# See https://github.com/DavidAnson/markdownlint#rules--aliases for rules
|
||||
MD007: {"indent": 4}
|
||||
MD013: false
|
||||
MD024: {siblings_only: true}
|
||||
MD025: false
|
||||
MD033: {no-inline-html: false}
|
||||
no-hard-tabs: false
|
||||
whitespace: false
|
||||
9
.github/linters/yaml-lint.yml
vendored
9
.github/linters/yaml-lint.yml
vendored
@@ -1,9 +0,0 @@
|
||||
---
|
||||
# Default rules for YAML linting from super-linter.
|
||||
# See: See https://yamllint.readthedocs.io/en/stable/rules.html
|
||||
extends: default
|
||||
rules:
|
||||
document-end: disable
|
||||
document-start: disable
|
||||
line-length: disable
|
||||
truthy: disable
|
||||
29
.github/mergify.yml
vendored
29
.github/mergify.yml
vendored
@@ -1,33 +1,16 @@
|
||||
queue_rules:
|
||||
- name: default
|
||||
conditions:
|
||||
- base=main
|
||||
- label=S:automerge
|
||||
|
||||
pull_request_rules:
|
||||
- name: Automerge to main
|
||||
- name: Automerge to master
|
||||
conditions:
|
||||
- base=main
|
||||
- base=master
|
||||
- label=S:automerge
|
||||
actions:
|
||||
queue:
|
||||
merge:
|
||||
method: squash
|
||||
name: default
|
||||
commit_message_template: |
|
||||
{{ title }} (#{{ number }})
|
||||
|
||||
{{ body }}
|
||||
- name: backport patches to v0.37.x branch
|
||||
conditions:
|
||||
- base=main
|
||||
- label=S:backport-to-v0.37.x
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.37.x
|
||||
strict: true
|
||||
commit_message: title+body
|
||||
- name: backport patches to v0.34.x branch
|
||||
conditions:
|
||||
- base=main
|
||||
- base=master
|
||||
- label=S:backport-to-v0.34.x
|
||||
actions:
|
||||
backport:
|
||||
|
||||
82
.github/workflows/build.yml
vendored
82
.github/workflows/build.yml
vendored
@@ -1,82 +0,0 @@
|
||||
name: Build
|
||||
# Tests runs different tests (test_abci_apps, test_abci_cli, test_apps)
|
||||
# This workflow runs on every push to main or release branch and every pull requests
|
||||
# All jobs will pass without running if no *{.go, .mod, .sum} files have been modified
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
goarch: ["arm", "amd64"]
|
||||
goos: ["linux"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/*.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
Makefile
|
||||
- name: install
|
||||
run: GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} make build
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
test_abci_cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/*.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: make install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- run: abci/tests/test_cli/test.sh
|
||||
shell: bash
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
test_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/*.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: make install install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- name: test_apps
|
||||
run: test/app/test.sh
|
||||
shell: bash
|
||||
if: "env.GIT_DIFF != ''"
|
||||
73
.github/workflows/check-generated.yml
vendored
73
.github/workflows/check-generated.yml
vendored
@@ -1,73 +0,0 @@
|
||||
# Verify that generated code is up-to-date.
|
||||
#
|
||||
# Note that we run these checks regardless whether the input files have
|
||||
# changed, because generated code can change in response to toolchain updates
|
||||
# even if no files in the repository are modified.
|
||||
name: Check generated code
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
check-mocks:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: "Check generated mocks"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
make mockery 2>/dev/null
|
||||
|
||||
if ! git diff --stat --exit-code ; then
|
||||
echo ">> ERROR:"
|
||||
echo ">>"
|
||||
echo ">> Generated mocks require update (either Mockery or source files may have changed)."
|
||||
echo ">> Ensure your tools are up-to-date, re-run 'make mockery' and update this PR."
|
||||
echo ">>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
check-proto:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1 # we need a .git directory to run git diff
|
||||
|
||||
- name: "Check protobuf generated code"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Install buf and gogo tools, so that differences that arise from
|
||||
# toolchain differences are also caught.
|
||||
readonly tools="$(mktemp -d)"
|
||||
export PATH="${PATH}:${tools}/bin"
|
||||
export GOBIN="${tools}/bin"
|
||||
|
||||
go install github.com/bufbuild/buf/cmd/buf
|
||||
go install github.com/cosmos/gogoproto/protoc-gen-gogofaster@latest
|
||||
|
||||
make proto-gen
|
||||
|
||||
if ! git diff --stat --exit-code ; then
|
||||
echo ">> ERROR:"
|
||||
echo ">>"
|
||||
echo ">> Protobuf generated code requires update (either tools or .proto files may have changed)."
|
||||
echo ">> Ensure your tools are up-to-date, re-run 'make proto-gen' and update this PR."
|
||||
echo ">>"
|
||||
exit 1
|
||||
fi
|
||||
127
.github/workflows/coverage.yml
vendored
Normal file
127
.github/workflows/coverage.yml
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
name: Test Coverage
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
split-test-files:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- name: Create a file with all the pkgs
|
||||
run: go list ./... > pkgs.txt
|
||||
- name: Split pkgs into 4 files
|
||||
run: split -d -n l/4 pkgs.txt pkgs.txt.part.
|
||||
# cache multiple
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-00"
|
||||
path: ./pkgs.txt.part.00
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-01"
|
||||
path: ./pkgs.txt.part.01
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-02"
|
||||
path: ./pkgs.txt.part.02
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-03"
|
||||
path: ./pkgs.txt.part.03
|
||||
|
||||
build-linux:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
goarch: ["arm", "amd64"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: GOOS=linux GOARCH=${{ matrix.goarch }} make build
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: split-test-files
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
part: ["00", "01", "02", "03"]
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}"
|
||||
if: env.GIT_DIFF
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.16
|
||||
- name: test & coverage report creation
|
||||
run: |
|
||||
cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
|
||||
path: ./${{ matrix.part }}profile.out
|
||||
|
||||
upload-coverage-report:
|
||||
runs-on: ubuntu-latest
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-00-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-01-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-02-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-03-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v2.0.2
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
25
.github/workflows/docker.yml
vendored
25
.github/workflows/docker.yml
vendored
@@ -1,21 +1,20 @@
|
||||
name: Docker
|
||||
# Build & Push rebuilds the Tendermint docker image on every push to main and creation of tags
|
||||
# and pushes the image to https://hub.docker.com/r/tendermint/tendermint
|
||||
name: Build & Push
|
||||
# Build & Push rebuilds the tendermint docker image on every push to master and creation of tags
|
||||
# and pushes the image to https://hub.docker.com/r/interchainio/simapp/tags
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" # e.g. v0.37.0-beta.1, v0.38.0-beta.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc*" # Push events to matching v*, i.e. v1.0-rc1, v20.15.10-rc5
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
@@ -40,18 +39,18 @@ jobs:
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Build
|
||||
uses: docker/setup-buildx-action@v2.2.1
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1.5.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v2.1.0
|
||||
uses: docker/login-action@v1.10.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v3.2.0
|
||||
uses: docker/build-push-action@v2.6.1
|
||||
with:
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
|
||||
65
.github/workflows/docs-deployment.yml
vendored
65
.github/workflows/docs-deployment.yml
vendored
@@ -1,65 +0,0 @@
|
||||
# Build and deploy the docs.tendermint.com website content.
|
||||
# The static content is published to GitHub Pages.
|
||||
#
|
||||
# For documentation build info, see docs/DOCS_README.md.
|
||||
name: Build static documentation site
|
||||
on:
|
||||
workflow_dispatch: # allow manual updates
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- v0.34.x
|
||||
paths:
|
||||
- docs/**
|
||||
- spec/**
|
||||
|
||||
jobs:
|
||||
# This is split into two jobs so that the build, which runs npm, does not
|
||||
# have write access to anything. The deploy requires write access to publish
|
||||
# to the branch used by GitHub Pages, however, so we can't just make the
|
||||
# whole workflow read-only.
|
||||
build:
|
||||
name: VuePress build
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: alpine:latest
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Install generator dependencies
|
||||
run: |
|
||||
apk add --no-cache make bash git npm
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
# We need to fetch full history so the backport branches for previous
|
||||
# versions will be available for the build.
|
||||
fetch-depth: 0
|
||||
- name: Build documentation
|
||||
env:
|
||||
NODE_OPTIONS: "--openssl-legacy-provider"
|
||||
run: |
|
||||
git config --global --add safe.directory "$PWD"
|
||||
make build-docs
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: build-output
|
||||
path: /tmp/tendermint-core-docs
|
||||
|
||||
deploy:
|
||||
name: Deploy to GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: build-output
|
||||
path: ~/output
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: JamesIves/github-pages-deploy-action@v4
|
||||
with:
|
||||
branch: 'docs-tendermint-com'
|
||||
folder: ~/output
|
||||
single-commit: true
|
||||
20
.github/workflows/docs-toc.yml
vendored
20
.github/workflows/docs-toc.yml
vendored
@@ -1,20 +0,0 @@
|
||||
# Verify that important design docs have ToC entries.
|
||||
name: Check documentation ToC
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
docs/architecture/**
|
||||
docs/rfc/**
|
||||
- run: make check-docs-toc
|
||||
if: env.GIT_DIFF
|
||||
36
.github/workflows/e2e-manual.yml
vendored
36
.github/workflows/e2e-manual.yml
vendored
@@ -1,36 +0,0 @@
|
||||
# Runs randomly generated E2E testnets nightly on main
|
||||
# manually run e2e tests
|
||||
name: e2e-manual
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 5 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
71
.github/workflows/e2e-nightly-34x.yml
vendored
71
.github/workflows/e2e-nightly-34x.yml
vendored
@@ -1,10 +1,12 @@
|
||||
# Runs randomly generated E2E testnets nightly on the 0.34.x branch.
|
||||
# Runs randomly generated E2E testnets nightly
|
||||
# on the 0.34.x release branch
|
||||
|
||||
# !! This file should be kept in sync with the e2e-nightly-main.yml file,
|
||||
# modulo changes to the version labels.
|
||||
# !! If you change something in this file, you probably want
|
||||
# to update the e2e-nightly-master workflow as well!
|
||||
|
||||
name: e2e-nightly-34x
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually, in theory
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
@@ -15,24 +17,18 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01']
|
||||
group: ['00', '01', '02', '03']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.18'
|
||||
go-version: '1.16'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
ref: 'v0.34.x'
|
||||
|
||||
- name: Capture git repo info
|
||||
id: git-info
|
||||
run: |
|
||||
echo "::set-output name=branch::`git branch --show-current`"
|
||||
echo "::set-output name=commit::`git rev-parse HEAD`"
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
@@ -41,39 +37,40 @@ jobs:
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 2 -d networks/nightly
|
||||
run: ./build/generator -g 4 -d networks/nightly
|
||||
|
||||
- name: Run testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
outputs:
|
||||
git-branch: ${{ steps.git-info.outputs.branch }}
|
||||
git-commit: ${{ steps.git-info.outputs.commit }}
|
||||
|
||||
e2e-nightly-fail:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ needs.e2e-nightly-test.outputs.git-commit }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on v0.34.x
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on v0.34.x
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
79
.github/workflows/e2e-nightly-37x.yml
vendored
79
.github/workflows/e2e-nightly-37x.yml
vendored
@@ -1,79 +0,0 @@
|
||||
# Runs randomly generated E2E testnets nightly on the v0.37.x branch.
|
||||
|
||||
# !! This file should be kept in sync with the e2e-nightly-main.yml file,
|
||||
# modulo changes to the version labels.
|
||||
|
||||
name: e2e-nightly-37x
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03', "04"]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: 'v0.37.x'
|
||||
|
||||
- name: Capture git repo info
|
||||
id: git-info
|
||||
run: |
|
||||
echo "::set-output name=branch::`git branch --show-current`"
|
||||
echo "::set-output name=commit::`git rev-parse HEAD`"
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 5 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
outputs:
|
||||
git-branch: ${{ steps.git-info.outputs.branch }}
|
||||
git-commit: ${{ steps.git-info.outputs.commit }}
|
||||
|
||||
e2e-nightly-fail:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ needs.e2e-nightly-test.outputs.git-commit }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
68
.github/workflows/e2e-nightly-main.yml
vendored
68
.github/workflows/e2e-nightly-main.yml
vendored
@@ -1,68 +0,0 @@
|
||||
# Runs randomly generated E2E testnets nightly on main
|
||||
|
||||
# !! Relevant changes to this file should be propagated to the e2e-nightly-<V>x
|
||||
# files for the supported backport branches, when appropriate, modulo version
|
||||
# markers.
|
||||
|
||||
name: e2e-nightly-main
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03', "04"]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 5 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ github.ref_name }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ github.sha }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> related to the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
74
.github/workflows/e2e-nightly-master.yml
vendored
Normal file
74
.github/workflows/e2e-nightly-master.yml
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
# Runs randomly generated E2E testnets nightly on master
|
||||
|
||||
# !! If you change something in this file, you probably want
|
||||
# to update the e2e-nightly-34x workflow as well!
|
||||
|
||||
name: e2e-nightly-master
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test-2:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
p2p: ['legacy', 'new', 'hybrid']
|
||||
group: ['00', '01']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.16'
|
||||
|
||||
- uses: actions/checkout@v2.3.4
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 2 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail-2:
|
||||
needs: e2e-nightly-test-2
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on master
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test-2
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on master
|
||||
SLACK_FOOTER: ''
|
||||
23
.github/workflows/e2e.yml
vendored
23
.github/workflows/e2e.yml
vendored
@@ -1,12 +1,12 @@
|
||||
name: e2e
|
||||
# Runs the CI end-to-end test network on all pushes to main or release branches
|
||||
# Runs the CI end-to-end test network on all pushes to master or release branches
|
||||
# and every pull request, but only if any Go files have been changed.
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
@@ -14,11 +14,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.18'
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
go-version: '1.16'
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -28,10 +28,15 @@ jobs:
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run two make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker runner tests
|
||||
run: make -j2 docker runner
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
- name: Run CI testnet
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/ci.toml
|
||||
run: ./build/runner -f networks/ci.toml
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
- name: Emit logs on failure
|
||||
if: ${{ failure() }}
|
||||
working-directory: test/e2e
|
||||
run: ./build/runner -f networks/ci.toml logs
|
||||
|
||||
56
.github/workflows/fuzz-nightly.yml
vendored
56
.github/workflows/fuzz-nightly.yml
vendored
@@ -1,11 +1,11 @@
|
||||
# Runs fuzzing nightly.
|
||||
name: Fuzz Tests
|
||||
name: fuzz-nightly
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 3 * * *'
|
||||
pull_request:
|
||||
branches: [main]
|
||||
branches: [master]
|
||||
paths:
|
||||
- "test/fuzz/**/*.go"
|
||||
|
||||
@@ -13,19 +13,24 @@ jobs:
|
||||
fuzz-nightly-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.18'
|
||||
go-version: '1.16'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/checkout@v2.3.4
|
||||
|
||||
- name: Install go-fuzz
|
||||
working-directory: test/fuzz
|
||||
run: go install github.com/dvyukov/go-fuzz/go-fuzz@latest github.com/dvyukov/go-fuzz/go-fuzz-build@latest
|
||||
run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
|
||||
- name: Fuzz mempool
|
||||
- name: Fuzz mempool-v1
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v1
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz mempool-v0
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v0
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-addrbook
|
||||
@@ -49,14 +54,14 @@ jobs:
|
||||
continue-on-error: true
|
||||
|
||||
- name: Archive crashers
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: crashers
|
||||
path: test/fuzz/**/crashers
|
||||
retention-days: 3
|
||||
|
||||
- name: Archive suppressions
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: suppressions
|
||||
path: test/fuzz/**/suppressions
|
||||
@@ -75,24 +80,13 @@ jobs:
|
||||
if: ${{ needs.fuzz-nightly-test.outputs.crashers-count != 0 }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
- name: Notify Slack if any crashers
|
||||
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ github.ref_name }}
|
||||
CRASHERS: ${{ needs.fuzz-nightly-test.outputs.crashers-count }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly fuzz tests for `${{ env.BRANCH }}` failed with ${{ env.CRASHERS }} crasher(s). See the <${{ env.RUN_URL }}|run details>."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly Fuzz Tests
|
||||
SLACK_ICON_EMOJI: ':firecracker:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Crashers found in Nightly Fuzz tests
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
2
.github/workflows/janitor.yml
vendored
2
.github/workflows/janitor.yml
vendored
@@ -10,7 +10,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 3
|
||||
steps:
|
||||
- uses: styfle/cancel-workflow-action@0.11.0
|
||||
- uses: styfle/cancel-workflow-action@0.9.0
|
||||
with:
|
||||
workflow_id: 1041851,1401230,2837803
|
||||
access_token: ${{ github.token }}
|
||||
|
||||
65
.github/workflows/jepsen.yml
vendored
Normal file
65
.github/workflows/jepsen.yml
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
# Runs a Jepsen test - cas-register (no nemesis) by default.
|
||||
# See inputs for various options.
|
||||
# Repo: https://github.com/tendermint/jepsen
|
||||
#
|
||||
# If you want to test a new breaking version of Tendermint, you'll need to
|
||||
# update the Merkleeyes ABCI app and 'merkleeyesUrl' input accordingly. You can
|
||||
# upload a new tarball to
|
||||
# https://github.com/tendermint/jepsen/releases/tag/0.2.1.
|
||||
#
|
||||
# Manually triggered.
|
||||
name: jepsen
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
workload:
|
||||
description: 'Test workload to run: (cas-register | set)'
|
||||
required: true
|
||||
default: 'cas-register'
|
||||
nemesis:
|
||||
description: 'Nemesis to use: (none | clocks | single-partitions | half-partitions | ring-partitions | split-dup-validators | peekaboo-dup-validators | changing-validators | crash | truncate-tendermint | truncate-merkleeyes)'
|
||||
required: true
|
||||
default: 'none'
|
||||
dupOrSuperByzValidators:
|
||||
description: '"--dup-validators" (multiple validators share the same key) and(or) "--super-byzantine-validators" (byzantine validators have just shy of 2/3 the voting weight)'
|
||||
required: false
|
||||
default: ''
|
||||
concurrency:
|
||||
description: 'How many workers should we run? Must be an integer and >= 10, optionally followed by n (e.g. 3n) to multiply by the number of nodes.'
|
||||
required: true
|
||||
default: 10
|
||||
timeLimit:
|
||||
description: 'Excluding setup and teardown, how long should a test run for, in seconds?'
|
||||
required: true
|
||||
default: 60
|
||||
tendermintUrl:
|
||||
description: 'Where to grab the Tendermint tarball (w/ linux/amd64 binary)'
|
||||
required: true
|
||||
default: 'https://github.com/melekes/katas/releases/download/0.2.0/tendermint.tar.gz'
|
||||
merkleeyesUrl:
|
||||
description: 'Where to grab the Merkleeyes tarball (w/ linux/amd64 binary)'
|
||||
required: true
|
||||
default: 'https://github.com/tendermint/jepsen/releases/download/0.2.1/merkleeyes_0.1.7.tar.gz'
|
||||
|
||||
jobs:
|
||||
jepsen-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout the Jepsen repository
|
||||
uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
repository: 'tendermint/jepsen'
|
||||
|
||||
- name: Start a Jepsen cluster in background
|
||||
working-directory: docker
|
||||
run: ./bin/up --daemon
|
||||
|
||||
- name: Run the test
|
||||
run: docker exec -i jepsen-control bash -c 'source /root/.bashrc; cd /jepsen/tendermint; lein run test --nemesis ${{ github.event.inputs.nemesis }} --workload ${{ github.event.inputs.workload }} --concurrency ${{ github.event.inputs.concurrency }} --tendermint-url ${{ github.event.inputs.tendermintUrl }} --merkleeyes-url ${{ github.event.inputs.merkleeyesUrl }} --time-limit ${{ github.event.inputs.timeLimit }} ${{ github.event.inputs.dupOrSuperByzValidators }}'
|
||||
|
||||
- name: Archive results
|
||||
uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: results
|
||||
path: tendermint/store/latest
|
||||
retention-days: 3
|
||||
12
.github/workflows/linkchecker.yml
vendored
Normal file
12
.github/workflows/linkchecker.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
name: Check Markdown links
|
||||
on:
|
||||
schedule:
|
||||
- cron: '* */24 * * *'
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
|
||||
with:
|
||||
folder-path: "docs"
|
||||
28
.github/workflows/lint.yml
vendored
28
.github/workflows/lint.yml
vendored
@@ -1,37 +1,29 @@
|
||||
name: Golang Linter
|
||||
# Lint runs golangci-lint over the entire Tendermint repository.
|
||||
#
|
||||
# This workflow is run on every pull request and push to main.
|
||||
#
|
||||
# The `golangci` job will pass without running if no *.{go, mod, sum}
|
||||
# files have been modified.
|
||||
#
|
||||
# To run this locally, simply run `make lint` from the root of the repo.
|
||||
|
||||
name: Lint
|
||||
# Lint runs golangci-lint over the entire Tendermint repository
|
||||
# This workflow is run on every pull request and push to master
|
||||
# The `golangci` job will pass without running if no *.{go, mod, sum} files have been modified.
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
jobs:
|
||||
golangci:
|
||||
name: golangci-lint
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 8
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v3
|
||||
- uses: golangci/golangci-lint-action@v2.5.2
|
||||
with:
|
||||
version: v1.50.1
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.38
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
|
||||
14
.github/workflows/linter.yml
vendored
14
.github/workflows/linter.yml
vendored
@@ -1,14 +1,14 @@
|
||||
name: Markdown Linter
|
||||
name: Lint
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
paths:
|
||||
- "**.md"
|
||||
- "**.yml"
|
||||
- "**.yaml"
|
||||
pull_request:
|
||||
branches: [main]
|
||||
branches: [master]
|
||||
paths:
|
||||
- "**.md"
|
||||
- "**.yml"
|
||||
@@ -19,14 +19,14 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2.3.4
|
||||
- name: Lint Code Base
|
||||
uses: docker://github/super-linter:v4
|
||||
uses: docker://github/super-linter:v3
|
||||
env:
|
||||
LINTER_RULES_PATH: .
|
||||
VALIDATE_ALL_CODEBASE: true
|
||||
DEFAULT_BRANCH: main
|
||||
DEFAULT_BRANCH: master
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
VALIDATE_MD: true
|
||||
VALIDATE_OPENAPI: true
|
||||
VALIDATE_YAML: true
|
||||
YAML_CONFIG_FILE: yaml-lint.yml
|
||||
|
||||
20
.github/workflows/markdown-links.yml
vendored
20
.github/workflows/markdown-links.yml
vendored
@@ -1,20 +0,0 @@
|
||||
name: Check Markdown links
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# 2am UTC daily
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
strategy:
|
||||
matrix:
|
||||
branch: ['main', 'v0.37.x', 'v0.34.x']
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ matrix.branch }}
|
||||
- uses: informalsystems/github-action-markdown-link-check@main
|
||||
with:
|
||||
config-file: '.md-link-check.json'
|
||||
65
.github/workflows/pre-release.yml
vendored
65
.github/workflows/pre-release.yml
vendored
@@ -1,65 +0,0 @@
|
||||
name: "Pre-release"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" # e.g. v0.37.0-beta.1, v0.38.0-beta.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10
|
||||
|
||||
jobs:
|
||||
prerelease:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
|
||||
|
||||
# Link to CHANGELOG_PENDING.md as release notes.
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG_PENDING.md > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
prerelease-success:
|
||||
needs: prerelease
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon pre-release
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":sparkles: New Tendermint pre-release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
51
.github/workflows/proto-docker.yml
vendored
Normal file
51
.github/workflows/proto-docker.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: Build & Push TM Proto Builder
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "tools/proto/*"
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "tools/proto/*"
|
||||
schedule:
|
||||
# run this job once a month to recieve any go or buf updates
|
||||
- cron: "* * 1 * *"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
DOCKER_IMAGE=tendermintdev/docker-build-proto
|
||||
VERSION=noop
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
elif [[ $GITHUB_REF == refs/heads/* ]]; then
|
||||
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
|
||||
if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then
|
||||
VERSION=latest
|
||||
fi
|
||||
fi
|
||||
TAGS="${DOCKER_IMAGE}:${VERSION}"
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1.5.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1.10.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2.6.1
|
||||
with:
|
||||
context: ./tools/proto
|
||||
file: ./tools/proto/Dockerfile
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
21
.github/workflows/proto-lint.yml
vendored
21
.github/workflows/proto-lint.yml
vendored
@@ -1,21 +0,0 @@
|
||||
name: Protobuf Lint
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'proto/**'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'proto/**'
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.9.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
23
.github/workflows/proto.yml
vendored
Normal file
23
.github/workflows/proto.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Protobuf
|
||||
# Protobuf runs buf (https://buf.build/) lint and check-breakage
|
||||
# This workflow is only run when a .proto file has been modified
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
pull_request:
|
||||
paths:
|
||||
- "**.proto"
|
||||
jobs:
|
||||
proto-lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- name: lint
|
||||
run: make proto-lint
|
||||
proto-breakage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- name: check-breakage
|
||||
run: make proto-check-breaking-ci
|
||||
46
.github/workflows/release.yml
vendored
46
.github/workflows/release.yml
vendored
@@ -2,61 +2,39 @@ name: "Release"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "RC[0-9]/**"
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
|
||||
jobs:
|
||||
release:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v2.3.4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.18'
|
||||
go-version: '1.16'
|
||||
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
|
||||
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
release-success:
|
||||
needs: release
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon release
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":rocket: New Tendermint release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v6
|
||||
- uses: actions/stale@v4
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
|
||||
|
||||
108
.github/workflows/tests.yml
vendored
108
.github/workflows/tests.yml
vendored
@@ -1,34 +1,106 @@
|
||||
name: Test
|
||||
name: Tests
|
||||
# Tests runs different tests (test_abci_apps, test_abci_cli, test_apps)
|
||||
# This workflow runs on every push to master or release branch and every pull requests
|
||||
# All jobs will pass without running if no *{.go, .mod, .sum} files have been modified
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
paths:
|
||||
- "**.go"
|
||||
branches:
|
||||
- main
|
||||
- master
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
part: ["00", "01", "02", "03", "04", "05"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
Makefile
|
||||
- name: Run Go Tests
|
||||
run: |
|
||||
make test-group-${{ matrix.part }} NUM_SPLIT=6
|
||||
- name: install
|
||||
run: make install install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
# Cache binaries for use by other jobs
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- run: abci/tests/test_cli/test.sh
|
||||
shell: bash
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.16"
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.6
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- name: test_apps
|
||||
run: test/app/test.sh
|
||||
shell: bash
|
||||
if: env.GIT_DIFF
|
||||
|
||||
11
.gitignore
vendored
11
.gitignore
vendored
@@ -25,7 +25,6 @@ docs/_build
|
||||
docs/dist
|
||||
docs/node_modules/
|
||||
docs/spec
|
||||
docs/.vuepress/public/rpc
|
||||
index.html.md
|
||||
libs/pubsub/query/fuzz_test/output
|
||||
profile\.out
|
||||
@@ -38,7 +37,6 @@ terraform.tfstate
|
||||
terraform.tfstate.backup
|
||||
terraform.tfstate.d
|
||||
test/app/grpc_client
|
||||
test/loadtime/build
|
||||
test/e2e/build
|
||||
test/e2e/networks/*/
|
||||
test/logs
|
||||
@@ -48,12 +46,3 @@ test/fuzz/**/corpus
|
||||
test/fuzz/**/crashers
|
||||
test/fuzz/**/suppressions
|
||||
test/fuzz/**/*.zip
|
||||
proto/spec/**/*.pb.go
|
||||
*.aux
|
||||
*.bbl
|
||||
*.blg
|
||||
*.pdf
|
||||
*.gz
|
||||
*.dvi
|
||||
# Python virtual environments
|
||||
.venv
|
||||
|
||||
@@ -1,36 +1,54 @@
|
||||
linters:
|
||||
enable:
|
||||
- asciicheck
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- errcheck
|
||||
- exportloopref
|
||||
# - funlen
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
- goconst
|
||||
- gocritic
|
||||
# - gocyclo
|
||||
# - godox
|
||||
- gofmt
|
||||
- goimports
|
||||
- revive
|
||||
- golint
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
# - interfacer
|
||||
- lll
|
||||
- misspell
|
||||
# - maligned
|
||||
- nakedret
|
||||
- nolintlint
|
||||
- prealloc
|
||||
- scopelint
|
||||
- staticcheck
|
||||
# - structcheck // to be fixed by golangci-lint
|
||||
- structcheck
|
||||
- stylecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
# - unparam
|
||||
- unused
|
||||
- varcheck
|
||||
# - whitespace
|
||||
# - wsl
|
||||
# - gocognit
|
||||
- nolintlint
|
||||
- asciicheck
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- gosec
|
||||
- linters:
|
||||
- lll
|
||||
source: "https://"
|
||||
max-same-issues: 50
|
||||
|
||||
linters-settings:
|
||||
|
||||
@@ -25,13 +25,12 @@ checksum:
|
||||
algorithm: sha256
|
||||
|
||||
release:
|
||||
prerelease: auto
|
||||
name_template: "{{.Version}}"
|
||||
name_template: "{{.Version}} (WARNING: BETA SOFTWARE)"
|
||||
|
||||
archives:
|
||||
- files:
|
||||
- LICENSE
|
||||
- README.md
|
||||
- UPGRADING.md
|
||||
- SECURITY.md
|
||||
- CHANGELOG.md
|
||||
- LICENSE
|
||||
- README.md
|
||||
- UPGRADING.md
|
||||
- SECURITY.md
|
||||
- CHANGELOG.md
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
# markdownlint configuration
|
||||
# https://github.com/DavidAnson/markdownlint
|
||||
|
||||
# Default state for all rules
|
||||
default: true
|
||||
|
||||
# See https://github.com/DavidAnson/markdownlint#rules--aliases for rules
|
||||
MD001: false
|
||||
MD007: {indent: 4}
|
||||
MD013: false
|
||||
MD024: {siblings_only: true}
|
||||
MD025: false
|
||||
MD033: false
|
||||
MD036: false
|
||||
MD010: false
|
||||
MD012: false
|
||||
MD028: false
|
||||
@@ -1,17 +0,0 @@
|
||||
{
|
||||
"retryOn429": true,
|
||||
"retryCount": 5,
|
||||
"fallbackRetryDelay": "30s",
|
||||
"aliveStatusCodes": [200, 206, 503],
|
||||
"httpHeaders": [
|
||||
{
|
||||
"urls": [
|
||||
"https://docs.github.com/",
|
||||
"https://help.github.com/"
|
||||
],
|
||||
"headers": {
|
||||
"Accept-Encoding": "zstd, br, gzip, deflate"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
356
CHANGELOG.md
356
CHANGELOG.md
@@ -1,250 +1,6 @@
|
||||
# Changelog
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.34.24
|
||||
|
||||
*Nov 22, 2022*
|
||||
|
||||
Apart from one minor bug fix, this release aims to optimize the output of the
|
||||
RPC (both HTTP and WebSocket endpoints). See our [upgrading
|
||||
guidelines](./UPGRADING.md#v03424) for more details.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- `[rpc]` [\#9724](https://github.com/tendermint/tendermint/issues/9724) Remove
|
||||
useless whitespace in RPC output (@adizere, @thanethomson)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- `[rpc]` [\#9692](https://github.com/tendermint/tendermint/issues/9692) Remove
|
||||
`Cache-Control` header response from `/check_tx` endpoint (@JayT106)
|
||||
|
||||
## v0.34.23
|
||||
|
||||
*Nov 9, 2022*
|
||||
|
||||
This release introduces some new Prometheus metrics to help in determining what
|
||||
kinds of messages are consuming the most P2P bandwidth. This builds towards our
|
||||
broader goal of optimizing Tendermint bandwidth consumption, and will give us
|
||||
meaningful insights once we can establish these metrics for a number of chains.
|
||||
|
||||
We now also return `Cache-Control` headers for select RPC endpoints to help
|
||||
facilitate caching.
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106
|
||||
|
||||
### IMPROVEMENTS
|
||||
- `[p2p]` [\#9641](https://github.com/tendermint/tendermint/issues/9641) Add new
|
||||
Envelope type and associated methods for sending and receiving Envelopes
|
||||
instead of raw bytes. This also adds new metrics,
|
||||
`tendermint_p2p_message_send_bytes_total` and
|
||||
`tendermint_p2p_message_receive_bytes_total`, that expose how many bytes of
|
||||
each message type have been sent.
|
||||
- `[rpc]` [\#9666](https://github.com/tendermint/tendermint/issues/9666) Enable
|
||||
caching of RPC responses (@JayT106)
|
||||
|
||||
The following RPC endpoints will return `Cache-Control` headers with a maximum
|
||||
age of 1 day:
|
||||
|
||||
- `/abci_info`
|
||||
- `/block`, if `height` is supplied
|
||||
- `/block_by_hash`
|
||||
- `/block_results`, if `height` is supplied
|
||||
- `/blockchain`
|
||||
- `/check_tx`
|
||||
- `/commit`, if `height` is supplied
|
||||
- `/consensus_params`, if `height` is supplied
|
||||
- `/genesis`
|
||||
- `/genesis_chunked`
|
||||
- `/tx`
|
||||
- `/validators`, if `height` is supplied
|
||||
|
||||
## v0.34.22
|
||||
|
||||
This release includes several bug fixes, [one of
|
||||
which](https://github.com/tendermint/tendermint/pull/9518) we discovered while
|
||||
building up a baseline for v0.34 against which to compare our upcoming v0.37
|
||||
release during our [QA process](./docs/qa/).
|
||||
|
||||
Special thanks to external contributors on this release: @RiccardoM
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#9423](https://github.com/tendermint/tendermint/pull/9423) Support
|
||||
HTTPS URLs from the WebSocket client (@RiccardoM, @cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [config] [\#9483](https://github.com/tendermint/tendermint/issues/9483)
|
||||
Calling `tendermint init` would incorrectly leave out the new `[storage]`
|
||||
section delimiter in the generated configuration file - this has now been
|
||||
fixed
|
||||
- [p2p] [\#9500](https://github.com/tendermint/tendermint/issues/9500) Prevent
|
||||
peers who have errored being added to the peer set (@jmalicevic)
|
||||
- [indexer] [\#9473](https://github.com/tendermint/tendermint/issues/9473) Fix
|
||||
bug that caused the psql indexer to index empty blocks whenever one of the
|
||||
transactions returned a non zero code. The relevant deduplication logic has
|
||||
been moved within the kv indexer only (@cmwaters)
|
||||
- [blocksync] [\#9518](https://github.com/tendermint/tendermint/issues/9518) A
|
||||
block sync stall was observed during our QA process whereby the node was
|
||||
unable to make progress. Retrying block requests after a timeout fixes this.
|
||||
|
||||
## v0.34.21
|
||||
|
||||
Release highlights include:
|
||||
|
||||
- A new `[storage]` configuration section and flag `discard_abci_responses`,
|
||||
which, if enabled, discards all ABCI responses except the latest one in order
|
||||
to reduce disk space usage in the state store. When enabled, the
|
||||
`block_results` RPC endpoint can no longer function and will return an error.
|
||||
- A new CLI command, `reindex-event`, to re-index block and tx events to the
|
||||
event sinks. You can run this command when the event store backend
|
||||
dropped/disconnected or you want to replace the backend. When
|
||||
`discard_abci_responses` is enabled, you will not be able to use this command.
|
||||
|
||||
Special thanks to external contributors on this release: @rootwarp & @animart
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#9083](https://github.com/tendermint/tendermint/issues/9083) Backport command to reindex missed events (@cmwaters)
|
||||
- [cli] [\#9107](https://github.com/tendermint/tendermint/issues/9107) Add the `p2p.external-address` argument to set the node P2P external address (@amimart)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [config] [\#9054](https://github.com/tendermint/tendermint/issues/9054) `discard_abci_responses` flag added to discard all ABCI
|
||||
responses except the last in order to save on storage space in the state
|
||||
store (@samricotta)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [mempool] [\#9033](https://github.com/tendermint/tendermint/issues/9033) Rework lock discipline to mitigate callback deadlocks in the
|
||||
priority mempool
|
||||
- [cli] [\#9103](https://github.com/tendermint/tendermint/issues/9103) fix unsafe-reset-all for working with home path (@rootwarp)
|
||||
|
||||
## v0.34.20
|
||||
|
||||
Special thanks to external contributors on this release: @joeabbey @yihuang
|
||||
|
||||
This release introduces a prioritized mempool. Further notes can be found in UPGRADING.md.
|
||||
|
||||
NOTE: There's a known issue when combining the prioritized mempool with the ABCI socket client, that the team are curently working to resolve. Read more about the issue [here](https://github.com/tendermint/tendermint/pull/9030).
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [blocksync] [\#8496](https://github.com/tendermint/tendermint/pull/8496) validate block against state before persisting it to disk (@cmwaters)
|
||||
- [indexer] [#8625](https://github.com/tendermint/tendermint/pull/8625) Fix overriding tx index of duplicated txs. (@yihuang)
|
||||
- [mempool] [\#8962](https://github.com/tendermint/tendermint/issues/8962) Backport priority mempool fixes from v0.35.x to v0.34.x (@creachadair).
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#8674] Add command to force compact goleveldb databases (@cmwaters)
|
||||
- [mempool] [\#8695] Port back the priority mempool. (@alexanderbez, @jmalicevic, @cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [logging] [\#8845](https://github.com/tendermint/tendermint/issues/8845) Add "Lazy" Stringers to defer Sprintf and Hash until logs print. (@joeabbey)
|
||||
|
||||
## v0.34.19
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [cli] [\#8270](https://github.com/tendermint/tendermint/issues/8270) fix reset commands (@alexanderbez).
|
||||
|
||||
## v0.34.18
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [cli] [\#8258](https://github.com/tendermint/tendermint/pull/8258) Fix a bug in the cli that caused `unsafe-reset-all` to panic
|
||||
|
||||
## v0.34.17
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
|
||||
- [cli] [\#8081](https://github.com/tendermint/tendermint/issues/8081) make the reset command safe to use (@marbar3778).
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#8079](https://github.com/tendermint/tendermint/issues/8079) start the timeout ticker before relay (backport #7844) (@creachadair).
|
||||
- [consensus] [\#7992](https://github.com/tendermint/tendermint/issues/7992) [\#7994](https://github.com/tendermint/tendermint/issues/7994) change lock handling in handleMsg and reactor to alleviate issues gossiping during long ABCI calls (@williambanfield).
|
||||
|
||||
## v0.34.16
|
||||
|
||||
Special thanks to external contributors on this release: @yihuang
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#7617](https://github.com/tendermint/tendermint/issues/7617) calculate prevote message delay metric (backport #7551) (@williambanfield).
|
||||
- [consensus] [\#7631](https://github.com/tendermint/tendermint/issues/7631) check proposal non-nil in prevote message delay metric (backport #7625) (@williambanfield).
|
||||
- [statesync] [\#7885](https://github.com/tendermint/tendermint/issues/7885) statesync: assert app version matches (backport #7856) (@cmwaters).
|
||||
- [statesync] [\#7881](https://github.com/tendermint/tendermint/issues/7881) fix app hash in state rollback (backport #7837) (@cmwaters).
|
||||
- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang).
|
||||
|
||||
## v0.34.15
|
||||
|
||||
Special thanks to external contributors on this release: @thanethomson
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [\#7368](https://github.com/tendermint/tendermint/issues/7368) cmd: add integration test for rollback functionality (@cmwaters).
|
||||
- [\#7309](https://github.com/tendermint/tendermint/issues/7309) pubsub: Report a non-nil error when shutting down (fixes #7306).
|
||||
- [\#7057](https://github.com/tendermint/tendermint/pull/7057) Import Postgres driver support for the psql indexer (@creachadair).
|
||||
- [\#7106](https://github.com/tendermint/tendermint/pull/7106) Revert mutex change to ABCI Clients (@tychoish).
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [config] [\#7230](https://github.com/tendermint/tendermint/issues/7230) rpc: Add experimental config params to allow for subscription buffer size control (@thanethomson).
|
||||
|
||||
## v0.34.14
|
||||
|
||||
This release backports the `rollback` feature to allow recovery in the event of an incorrect app hash.
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [\#6982](https://github.com/tendermint/tendermint/pull/6982) The tendermint binary now has built-in suppport for running the end-to-end test application (with state sync support) (@cmwaters).
|
||||
- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state. This may be useful in the event of non-determinstic app hash or when reverting an upgrade. @cmwaters
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [\#7103](https://github.com/tendermint/tendermint/pull/7104) Remove IAVL dependency (backport of #6550) (@cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [\#7057](https://github.com/tendermint/tendermint/pull/7057) Import Postgres driver support for the psql indexer (@creachadair).
|
||||
- [ABCI] [\#7110](https://github.com/tendermint/tendermint/issues/7110) Revert "change client to use multi-reader mutexes (#6873)" (@tychoish).
|
||||
|
||||
## v0.34.13
|
||||
|
||||
This release backports improvements to state synchronization and ABCI
|
||||
performance under concurrent load, and the PostgreSQL event indexer.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [statesync] [\#6881](https://github.com/tendermint/tendermint/issues/6881) improvements to stateprovider logic (@cmwaters)
|
||||
- [ABCI] [\#6873](https://github.com/tendermint/tendermint/issues/6873) change client to use multi-reader mutexes (@tychoish)
|
||||
- [indexing] [\#6906](https://github.com/tendermint/tendermint/issues/6906) enable the PostgreSQL indexer sink (@creachadair)
|
||||
|
||||
## v0.34.12
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106.
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#6717](https://github.com/tendermint/tendermint/pull/6717) introduce
|
||||
`/genesis_chunked` rpc endpoint for handling large genesis files by chunking them (@tychoish)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [rpc] [\#6825](https://github.com/tendermint/tendermint/issues/6825) Remove egregious INFO log from `ABCI#Query` RPC. (@alexanderbez)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [light] [\#6685](https://github.com/tendermint/tendermint/pull/6685) fix bug
|
||||
with incorrectly handling contexts that would occasionally freeze state sync. (@cmwaters)
|
||||
- [privval] [\#6748](https://github.com/tendermint/tendermint/issues/6748) Fix vote timestamp to prevent chain halt (@JayT106)
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
## v0.34.11
|
||||
|
||||
@@ -256,25 +12,25 @@ adding two new parameters to the state sync config.
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Apps
|
||||
- [Version] [\#6494](https://github.com/tendermint/tendermint/issues/6494) `TMCoreSemVer` is not required to be set as a ldflag any longer.
|
||||
- [Version] \#6494 `TMCoreSemVer` is not required to be set as a ldflag any longer.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [statesync] [\#6566](https://github.com/tendermint/tendermint/issues/6566) Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
||||
- [statesync] [\#6378](https://github.com/tendermint/tendermint/issues/6378) Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish)
|
||||
- [statesync] [\#6582](https://github.com/tendermint/tendermint/issues/6582) Increase chunk priority and add multiple retry chunk requests (@cmwaters)
|
||||
- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
||||
- [statesync] \#6378 Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish)
|
||||
- [statesync] \#6582 Increase chunk priority and add multiple retry chunk requests (@cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [evidence] [\#6375](https://github.com/tendermint/tendermint/issues/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters)
|
||||
- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters)
|
||||
|
||||
## v0.34.10
|
||||
|
||||
*April 14, 2021*
|
||||
|
||||
This release fixes a bug where peers would sometimes try to send messages
|
||||
This release fixes a bug where peers would sometimes try to send messages
|
||||
on incorrect channels. Special thanks to our friends at Oasis Labs for surfacing
|
||||
this issue!
|
||||
this issue!
|
||||
|
||||
- [p2p/node] [\#6339](https://github.com/tendermint/tendermint/issues/6339) Fix bug with using custom channels (@cmwaters)
|
||||
- [light] [\#6346](https://github.com/tendermint/tendermint/issues/6346) Correctly handle too high errors to improve client robustness (@cmwaters)
|
||||
@@ -283,7 +39,7 @@ this issue!
|
||||
|
||||
*April 8, 2021*
|
||||
|
||||
This release fixes a moderate severity security issue, Security Advisory Alderfly,
|
||||
This release fixes a moderate severity security issue, Security Advisory Alderfly,
|
||||
which impacts all networks that rely on Tendermint light clients.
|
||||
Further details will be released once networks have upgraded.
|
||||
|
||||
@@ -356,17 +112,17 @@ shout-out to @marbar3778 for diagnosing it quickly.
|
||||
|
||||
## v0.34.6
|
||||
|
||||
*February 18, 2021*
|
||||
*February 18, 2021*
|
||||
|
||||
_Tendermint Core v0.34.5 and v0.34.6 have been recalled due to build tooling problems._
|
||||
_Tendermint Core v0.34.5 and v0.34.6 have been recalled due to release tooling problems._
|
||||
|
||||
## v0.34.4
|
||||
|
||||
*February 11, 2021*
|
||||
|
||||
This release includes a fix for a memory leak in the evidence reactor (see #6068, below).
|
||||
All Tendermint clients are recommended to upgrade.
|
||||
Thank you to our friends at Crypto.com for the initial report of this memory leak!
|
||||
This release includes a fix for a memory leak in the evidence reactor (see #6068, below).
|
||||
All Tendermint clients are recommended to upgrade.
|
||||
Thank you to our friends at Crypto.com for the initial report of this memory leak!
|
||||
|
||||
Special thanks to other external contributors on this release: @yayajacky, @odidev, @laniehei, and @c29r3!
|
||||
|
||||
@@ -376,18 +132,17 @@ Special thanks to other external contributors on this release: @yayajacky, @odid
|
||||
- [light] [\#6026](https://github.com/tendermint/tendermint/pull/6026) Fix a bug when height isn't provided for the rpc calls: `/commit` and `/validators` (@cmwaters)
|
||||
- [evidence] [\#6068](https://github.com/tendermint/tendermint/pull/6068) Terminate broadcastEvidenceRoutine when peer is stopped (@melekes)
|
||||
|
||||
|
||||
## v0.34.3
|
||||
## v0.34.3
|
||||
|
||||
*January 19, 2021*
|
||||
|
||||
This release includes a fix for a high-severity security vulnerability,
|
||||
This release includes a fix for a high-severity security vulnerability,
|
||||
a DoS-vector that impacted Tendermint Core v0.34.0-v0.34.2. For more details, see
|
||||
[Security Advisory Mulberry](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg)
|
||||
or https://nvd.nist.gov/vuln/detail/CVE-2021-21271.
|
||||
[Security Advisory Mulberry](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg)
|
||||
or https://nvd.nist.gov/vuln/detail/CVE-2021-21271.
|
||||
|
||||
Tendermint Core v0.34.3 also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for
|
||||
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
|
||||
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
@@ -479,14 +234,14 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [blockchain] [\#4637](https://github.com/tendermint/tendermint/pull/4637) Migrate blockchain reactor(s) to Protobuf encoding (@marbar3778)
|
||||
- [evidence] [\#4949](https://github.com/tendermint/tendermint/pull/4949) Migrate evidence reactor to Protobuf encoding (@marbar3778)
|
||||
- [mempool] [\#4940](https://github.com/tendermint/tendermint/pull/4940) Migrate mempool from to Protobuf encoding (@marbar3778)
|
||||
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
|
||||
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
|
||||
- `MaxBatchBytes` new config setting defines the max size of one batch.
|
||||
- [p2p/pex] [\#4973](https://github.com/tendermint/tendermint/pull/4973) Migrate `p2p/pex` reactor to Protobuf encoding (@marbar3778)
|
||||
- [statesync] [\#4943](https://github.com/tendermint/tendermint/pull/4943) Migrate state sync reactor to Protobuf encoding (@marbar3778)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
|
||||
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
|
||||
- [evidence] [\#5499](https://github.com/tendermint/tendermint/pull/5449) Cap evidence to a maximum number of bytes (supercedes [\#4780](https://github.com/tendermint/tendermint/pull/4780)) (@cmwaters)
|
||||
- [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) Header hashes are no longer empty for empty inputs, notably `DataHash`, `EvidenceHash`, and `LastResultsHash` (@erikgrinaker)
|
||||
- [state] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Include `GasWanted` and `GasUsed` into `LastResultsHash` (@melekes)
|
||||
@@ -545,7 +300,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [types] [\#4852](https://github.com/tendermint/tendermint/pull/4852) Vote & Proposal `SignBytes` is now func `VoteSignBytes` & `ProposalSignBytes` (@marbar3778)
|
||||
- [types] [\#4798](https://github.com/tendermint/tendermint/pull/4798) Simplify `VerifyCommitTrusting` func + remove extra validation (@melekes)
|
||||
- [types] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Remove `ABCIResult` (@melekes)
|
||||
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
|
||||
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `Total` in `Parts` & `PartSetHeader` has been changed from a `int` to a `uint32` (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Vote: `ValidatorIndex` & `Round` are now `int32` (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Proposal: `POLRound` & `Round` are now `int32` (@marbar3778)
|
||||
@@ -558,7 +313,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [abci] [\#5174](https://github.com/tendermint/tendermint/pull/5174) Remove `MockEvidence` in favor of testing with actual evidence types (`DuplicateVoteEvidence` & `LightClientAttackEvidence`) (@cmwaters)
|
||||
- [abci] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `InitChain.InitialHeight` field giving the initial block height (@erikgrinaker)
|
||||
- [abci] [\#5227](https://github.com/tendermint/tendermint/pull/5227) Add `ResponseInitChain.app_hash` which is recorded in genesis block (@erikgrinaker)
|
||||
- [config] [\#5147](https://github.com/tendermint/tendermint/pull/5147) Add `--consensus.double_sign_check_height` flag and `DoubleSignCheckHeight` config variable. See [ADR-51](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-051-double-signing-risk-reduction.md) (@dongsam)
|
||||
- [config] [\#5147](https://github.com/tendermint/tendermint/pull/5147) Add `--consensus.double_sign_check_height` flag and `DoubleSignCheckHeight` config variable. See [ADR-51](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-051-double-signing-risk-reduction.md) (@dongsam)
|
||||
- [db] [\#5233](https://github.com/tendermint/tendermint/pull/5233) Add support for `badgerdb` database backend (@erikgrinaker)
|
||||
- [evidence] [\#4532](https://github.com/tendermint/tendermint/pull/4532) Handle evidence from light clients (@melekes)
|
||||
- [evidence] [#4821](https://github.com/tendermint/tendermint/pull/4821) Amnesia (light client attack) evidence can be detected, verified and committed (@cmwaters)
|
||||
@@ -572,7 +327,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [rpc] [\#5017](https://github.com/tendermint/tendermint/pull/5017) Add `/check_tx` endpoint to check transactions without executing them or adding them to the mempool (@melekes)
|
||||
- [rpc] [\#5108](https://github.com/tendermint/tendermint/pull/5108) Subscribe using the websocket for new evidence events (@cmwaters)
|
||||
- [statesync] Add state sync support, where a new node can be rapidly bootstrapped by fetching state snapshots from peers instead of replaying blocks. See the `[statesync]` config section.
|
||||
- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and refactor evidence lifecycle - for more information see [ADR-059](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-059-evidence-composition-and-lifecycle.md) (@cmwaters)
|
||||
- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and refactor evidence lifecycle - for more information see [ADR-059](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-059-evidence-composition-and-lifecycle.md) (@cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
@@ -583,7 +338,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
|
||||
- [evidence] [\#4722](https://github.com/tendermint/tendermint/pull/4722) Consolidate evidence store and pool types to improve evidence DB (@cmwaters)
|
||||
- [evidence] [\#4839](https://github.com/tendermint/tendermint/pull/4839) Reject duplicate evidence from being proposed (@cmwaters)
|
||||
- [evidence] [\#5219](https://github.com/tendermint/tendermint/pull/5219) Change the source of evidence time to block time (@cmwaters)
|
||||
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
|
||||
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
|
||||
- [light] [\#4935](https://github.com/tendermint/tendermint/pull/4935) Fetch and compare a new header with witnesses in parallel (@melekes)
|
||||
- [light] [\#4929](https://github.com/tendermint/tendermint/pull/4929) Compare header with witnesses only when doing bisection (@melekes)
|
||||
- [light] [\#4916](https://github.com/tendermint/tendermint/pull/4916) Validate basic for inbound validator sets and headers before further processing them (@cmwaters)
|
||||
@@ -652,7 +407,7 @@ This security release fixes:
|
||||
Tendermint 0.33.0 and above allow block proposers to include signatures for the
|
||||
wrong block. This may happen naturally if you start a network, have it run for
|
||||
some time and restart it **without changing the chainID**. (It is a
|
||||
[misconfiguration](https://docs.tendermint.com/v0.33/tendermint-core/using-tendermint.html)
|
||||
[misconfiguration](https://docs.tendermint.com/master/tendermint-core/using-tendermint.html)
|
||||
to reuse chainIDs.) Correct block proposers will accidentally include signatures
|
||||
for the wrong block if they see these signatures, and then commits won't validate,
|
||||
making all proposed blocks invalid. A malicious validator (even with a minimal
|
||||
@@ -951,7 +706,7 @@ and a validator address plus a timestamp. Note we may remove the validator
|
||||
address & timestamp fields in the future (see ADR-25).
|
||||
|
||||
`lite2` package has been added to solve `lite` issues and introduce weak
|
||||
subjectivity interface. Refer to the [spec](https://github.com/tendermint/tendermint/tree/main/spec/consensus/light-client) for complete details.
|
||||
subjectivity interface. Refer to the [spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md) for complete details.
|
||||
`lite` package is now deprecated and will be removed in v0.34 release.
|
||||
|
||||
### BREAKING CHANGES:
|
||||
@@ -1311,8 +1066,8 @@ Special thanks to external contributors on this release: @jon-certik, @gracenoah
|
||||
|
||||
*August 28, 2019*
|
||||
|
||||
@climber73 wrote the [Writing a Tendermint Core application in Java
|
||||
(gRPC)](https://docs.tendermint.com/v0.34/tutorials/java.html)
|
||||
@climber73 wrote the [Writing a Tendermint Core application in Java
|
||||
(gRPC)](https://github.com/tendermint/tendermint/blob/master/docs/guides/java.md)
|
||||
guide.
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@@ -1345,7 +1100,7 @@ Special thanks to external contributors on this release:
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/main/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-043-blockchain-riri-org.md)
|
||||
- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/master/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-043-blockchain-riri-org.md)
|
||||
- [mempool] [\#3826](https://github.com/tendermint/tendermint/issues/3826) Make `max_msg_bytes` configurable(@bluele)
|
||||
- [node] [\#3846](https://github.com/tendermint/tendermint/pull/3846) Allow replacing existing p2p.Reactor(s) using [`CustomReactors`
|
||||
option](https://godoc.org/github.com/tendermint/tendermint/node#CustomReactors).
|
||||
@@ -1403,7 +1158,7 @@ This release contains a minor enhancement to the ABCI and some breaking changes
|
||||
|
||||
- [p2p] [\#3338](https://github.com/tendermint/tendermint/issues/3338) Prevent "sent next PEX request too soon" errors by not calling
|
||||
ensurePeers outside of ensurePeersRoutine
|
||||
- [behavior] [\3772](https://github.com/tendermint/tendermint/pull/3772) Return correct reason in MessageOutOfOrder (@jim380)
|
||||
- [behaviour] [\3772](https://github.com/tendermint/tendermint/pull/3772) Return correct reason in MessageOutOfOrder (@jim380)
|
||||
- [config] [\#3723](https://github.com/tendermint/tendermint/issues/3723) Add consensus_params to testnet config generation; document time_iota_ms (@ashleyvega)
|
||||
|
||||
|
||||
@@ -1522,6 +1277,7 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
|
||||
@@ -1541,6 +1297,7 @@ identified and fixed here.
|
||||
Special thanks to [elvishacker](https://hackerone.com/elvishacker) for finding
|
||||
and reporting this.
|
||||
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1567,6 +1324,7 @@ accepting new peers and only allowing `ed25519` pubkeys.
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for pointing
|
||||
this out.
|
||||
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Only allow ed25519 pubkeys when connecting
|
||||
@@ -1582,6 +1340,7 @@ All clients are recommended to upgrade. See
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for discovering
|
||||
and reporting this issue.
|
||||
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Fix for panic on nil public key send to a peer
|
||||
@@ -1662,7 +1421,7 @@ Special thanks to external contributors on this release:
|
||||
- [libs/db] [\#3611](https://github.com/tendermint/tendermint/issues/3611) Conditional compilation
|
||||
* Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
|
||||
use `make build_c` / `make install_c` (full instructions can be found at
|
||||
<https://docs.tendermint.com>)
|
||||
https://docs.tendermint.com/master/introduction/install.html#compile-with-cleveldb-support)
|
||||
* Use `boltdb` tag to compile Tendermint with bolt db
|
||||
- [node] [\#3362](https://github.com/tendermint/tendermint/issues/3362) Return an error if `persistent_peers` list is invalid (except
|
||||
when IP lookup fails)
|
||||
@@ -1723,7 +1482,7 @@ It brings back `NetAddress()` to `NodeInfo` and uses it instead of `SocketAddr`
|
||||
Additionally, it improves response time on the `/validators` or `/status` RPC endpoints.
|
||||
As a side-effect it makes these RPC endpoint more difficult to DoS and fixes a performance degradation in `ExecCommitBlock`.
|
||||
Also, it contains an [ADR](https://github.com/tendermint/tendermint/pull/3539) that proposes decoupling the
|
||||
responsibility for peer behavior from the `p2p.Switch` (by @brapse).
|
||||
responsibility for peer behaviour from the `p2p.Switch` (by @brapse).
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@brapse, @guagualvcha, @mydring
|
||||
@@ -1876,6 +1635,7 @@ See the [v0.31.0
|
||||
Milestone](https://github.com/tendermint/tendermint/milestone/19?closed=1) for
|
||||
more details.
|
||||
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
@@ -1886,7 +1646,7 @@ more details.
|
||||
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique clientIDs with open subscriptions. Configurable via `rpc.max_subscription_clients`
|
||||
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique queries a given client can subscribe to at once. Configurable via `rpc.max_subscriptions_per_client`.
|
||||
- [rpc] [\#3435](https://github.com/tendermint/tendermint/issues/3435) Default ReadTimeout and WriteTimeout changed to 10s. WriteTimeout can increased by setting `rpc.timeout_broadcast_tx_commit` in the config.
|
||||
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
|
||||
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
|
||||
|
||||
* Apps
|
||||
- [abci] [\#3403](https://github.com/tendermint/tendermint/issues/3403) Remove `time_iota_ms` from BlockParams. This is a
|
||||
@@ -1939,7 +1699,7 @@ more details.
|
||||
- [blockchain] [\#3358](https://github.com/tendermint/tendermint/pull/3358) Fix timer leak in `BlockPool` (@guagualvcha)
|
||||
- [cmd] [\#3408](https://github.com/tendermint/tendermint/issues/3408) Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo)
|
||||
- [libs/db/remotedb/grpcdb] [\#3402](https://github.com/tendermint/tendermint/issues/3402) Close Iterator/ReverseIterator after use
|
||||
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-033-pubsub.md)
|
||||
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md)
|
||||
- [lite] [\#3364](https://github.com/tendermint/tendermint/issues/3364) Fix `/validators` and `/abci_query` proxy endpoints
|
||||
(@guagualvcha)
|
||||
- [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection
|
||||
@@ -2006,7 +1766,7 @@ For more, see issues marked
|
||||
|
||||
This release also includes a fix to prevent Tendermint from including the same
|
||||
piece of evidence in more than one block. This issue was reported by @chengwenxi in our
|
||||
[bug bounty program](https://hackerone.com/cosmos).
|
||||
[bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -2095,6 +1855,7 @@ This release contains two important fixes: one for p2p layer where we sometimes
|
||||
were not closing connections and one for consensus layer where consensus with
|
||||
no empty blocks (`create_empty_blocks = false`) could halt.
|
||||
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [pex] [\#3037](https://github.com/tendermint/tendermint/issues/3037) Only log "Reached max attempts to dial" once
|
||||
- [rpc] [\#3159](https://github.com/tendermint/tendermint/issues/3159) Expose
|
||||
@@ -2133,6 +1894,7 @@ While we are trying to stabilize the Block protocol to preserve compatibility
|
||||
with old chains, there may be some final changes yet to come before Cosmos
|
||||
launch as we continue to audit and test the software.
|
||||
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
@@ -2180,6 +1942,7 @@ launch as we continue to audit and test the software.
|
||||
Special thanks to external contributors on this release:
|
||||
@HaoyangLiu
|
||||
|
||||
|
||||
### BUG FIXES:
|
||||
- [consensus] Fix consensus halt from proposing blocks with too much evidence
|
||||
|
||||
@@ -2232,7 +1995,7 @@ See [UPGRADING.md](UPGRADING.md) for more details.
|
||||
|
||||
- [build] [\#3085](https://github.com/tendermint/tendermint/issues/3085) Fix `Version` field in build scripts (@husio)
|
||||
- [crypto/multisig] [\#3102](https://github.com/tendermint/tendermint/issues/3102) Fix multisig keys address length
|
||||
- [crypto/encoding] [\#3101](https://github.com/tendermint/tendermint/issues/3101) Fix `PubKeyMultisigThreshold` unmarshalling into `crypto.PubKey` interface
|
||||
- [crypto/encoding] [\#3101](https://github.com/tendermint/tendermint/issues/3101) Fix `PubKeyMultisigThreshold` unmarshaling into `crypto.PubKey` interface
|
||||
- [p2p/conn] [\#3111](https://github.com/tendermint/tendermint/issues/3111) Make SecretConnection thread safe
|
||||
- [rpc] [\#3053](https://github.com/tendermint/tendermint/issues/3053) Fix internal error in `/tx_search` when results are empty
|
||||
(@gianfelipe93)
|
||||
@@ -2307,6 +2070,7 @@ Special thanks to @dlguddus for discovering a [major
|
||||
issue](https://github.com/tendermint/tendermint/issues/2718#issuecomment-440888677)
|
||||
in the proposer selection algorithm.
|
||||
|
||||
|
||||
This release is primarily about fixes to the proposer selection algorithm
|
||||
in preparation for the [Cosmos Game of
|
||||
Stakes](https://blog.cosmos.network/the-game-of-stakes-is-open-for-registration-83a404746ee6).
|
||||
@@ -2368,6 +2132,7 @@ Special thanks to external contributors on this release:
|
||||
@ackratos, @goolAdapter, @james-ray, @joe-bowman, @kostko,
|
||||
@nagarajmanjunath, @tomtau
|
||||
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc] [\#2747](https://github.com/tendermint/tendermint/issues/2747) Enable subscription to tags emitted from `BeginBlock`/`EndBlock` (@kostko)
|
||||
@@ -2391,8 +2156,8 @@ Special thanks to external contributors on this release:
|
||||
- [blockchain] [\#2731](https://github.com/tendermint/tendermint/issues/2731) Retry both blocks if either is bad to avoid getting stuck during fast sync (@goolAdapter)
|
||||
- [consensus] [\#2893](https://github.com/tendermint/tendermint/issues/2893) Use genDoc.Validators instead of state.NextValidators on replay when appHeight==0 (@james-ray)
|
||||
- [log] [\#2868](https://github.com/tendermint/tendermint/issues/2868) Fix `module=main` setting overriding all others
|
||||
- NOTE: this changes the default logging behavior to be much less verbose.
|
||||
Set `log_level="info"` to restore the previous behavior.
|
||||
- NOTE: this changes the default logging behaviour to be much less verbose.
|
||||
Set `log_level="info"` to restore the previous behaviour.
|
||||
- [rpc] [\#2808](https://github.com/tendermint/tendermint/issues/2808) Fix `accum` field in `/validators` by calling `IncrementAccum` if necessary
|
||||
- [rpc] [\#2811](https://github.com/tendermint/tendermint/issues/2811) Allow integer IDs in JSON-RPC requests (@tomtau)
|
||||
- [txindex/kv] [\#2759](https://github.com/tendermint/tendermint/issues/2759) Fix tx.height range queries
|
||||
@@ -2406,6 +2171,7 @@ Special thanks to external contributors on this release:
|
||||
Special thanks to external contributors on this release:
|
||||
@danil-lashin, @kevlubkcm, @krhubert, @srmo
|
||||
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* Go API
|
||||
@@ -2493,7 +2259,7 @@ Special thanks to external contributors on this release:
|
||||
@james-ray, @overbool, @phymbert, @Slamper, @Uzair1995, @yutianwu.
|
||||
|
||||
Special thanks to @Slamper for a series of bug reports in our [bug bounty
|
||||
program](https://hackerone.com/cosmos) which are fixed in this release.
|
||||
program](https://hackerone.com/tendermint) which are fixed in this release.
|
||||
|
||||
This release is primarily about adding Version fields to various data structures,
|
||||
optimizing consensus messages for signing and verification in
|
||||
@@ -2523,7 +2289,7 @@ increasing attention to backwards compatibility. Thanks for bearing with us!
|
||||
* [state] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version field to State, breaking the format of State as
|
||||
encoded on disk.
|
||||
* [rpc] [\#2298](https://github.com/tendermint/tendermint/issues/2298) `/abci_query` takes `prove` argument instead of `trusted` and switches the default
|
||||
behavior to `prove=false`
|
||||
behaviour to `prove=false`
|
||||
* [rpc] [\#2654](https://github.com/tendermint/tendermint/issues/2654) Remove all `node_info.other.*_version` fields in `/status` and
|
||||
`/net_info`
|
||||
* [rpc] [\#2636](https://github.com/tendermint/tendermint/issues/2636) Remove
|
||||
@@ -2637,7 +2403,7 @@ Special thanks to external contributors on this release:
|
||||
This release is mostly about the ConsensusParams - removing fields and enforcing MaxGas.
|
||||
It also addresses some issues found via security audit, removes various unused
|
||||
functions from `libs/common`, and implements
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-012-peer-transport.md).
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-012-peer-transport.md).
|
||||
|
||||
BREAKING CHANGES:
|
||||
|
||||
@@ -2702,7 +2468,7 @@ are affected by a change.
|
||||
|
||||
A few more breaking changes are in the works - each will come with a clear
|
||||
Architecture Decision Record (ADR) explaining the change. You can review ADRs
|
||||
[here](https://github.com/tendermint/tendermint/tree/main/docs/architecture)
|
||||
[here](https://github.com/tendermint/tendermint/tree/develop/docs/architecture)
|
||||
or in the [open Pull Requests](https://github.com/tendermint/tendermint/pulls).
|
||||
You can also check in on the [issues marked as
|
||||
breaking](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3Abreaking).
|
||||
@@ -2718,7 +2484,7 @@ BREAKING CHANGES:
|
||||
- [abci] Added address of the original proposer of the block to Header
|
||||
- [abci] Change ABCI Header to match Tendermint exactly
|
||||
- [abci] [\#2159](https://github.com/tendermint/tendermint/issues/2159) Update use of `Validator` (see
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
- Remove PubKey from `Validator` (so it's just Address and Power)
|
||||
- Introduce `ValidatorUpdate` (with just PubKey and Power)
|
||||
- InitChain and EndBlock use ValidatorUpdate
|
||||
@@ -2740,7 +2506,7 @@ BREAKING CHANGES:
|
||||
- [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block (!)
|
||||
- Add NextValidatorSet to State, changes on-disk representation of state
|
||||
- [state] [\#2184](https://github.com/tendermint/tendermint/issues/2184) Enforce ConsensusParams.BlockSize.MaxBytes (See
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-020-block-size.md)).
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-020-block-size.md)).
|
||||
- Remove ConsensusParams.BlockSize.MaxTxs
|
||||
- Introduce maximum sizes for all components of a block, including ChainID
|
||||
- [types] Updates to the block Header:
|
||||
@@ -2751,7 +2517,7 @@ BREAKING CHANGES:
|
||||
- [consensus] [\#2203](https://github.com/tendermint/tendermint/issues/2203) Implement BFT time
|
||||
- Timestamp in block must be monotonic and equal the median of timestamps in block's LastCommit
|
||||
- [crypto] [\#2239](https://github.com/tendermint/tendermint/issues/2239) Secp256k1 signature changes (See
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-014-secp-malleability.md)):
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-014-secp-malleability.md)):
|
||||
- format changed from DER to `r || s`, both little endian encoded as 32 bytes.
|
||||
- malleability removed by requiring `s` to be in canonical form.
|
||||
|
||||
@@ -2981,7 +2747,7 @@ BREAKING CHANGES:
|
||||
FEATURES
|
||||
- [cmd] Added metrics (served under `/metrics` using a Prometheus client;
|
||||
disabled by default). See the new `instrumentation` section in the config and
|
||||
[metrics](https://github.com/tendermint/tendermint/blob/main/docs/tendermint-core/metrics.md)
|
||||
[metrics](https://tendermint.readthedocs.io/projects/tools/en/develop/metrics.html)
|
||||
guide.
|
||||
- [p2p] Add IPv6 support to peering.
|
||||
- [p2p] Add `external_address` to config to allow specifying the address for
|
||||
@@ -3095,7 +2861,7 @@ BREAKING:
|
||||
|
||||
FEATURES
|
||||
|
||||
- [rpc] the RPC documentation is now published to https://github.com/tendermint/tendermint/tree/main/spec/rpc
|
||||
- [rpc] the RPC documentation is now published to https://tendermint.github.io/slate
|
||||
- [p2p] AllowDuplicateIP config option to refuse connections from same IP.
|
||||
- true by default for now, false by default in next breaking release
|
||||
- [docs] Add docs for query, tx indexing, events, pubsub
|
||||
@@ -3551,7 +3317,7 @@ Also includes the Grand Repo-Merge of 2017.
|
||||
BREAKING CHANGES:
|
||||
|
||||
- Config and Flags:
|
||||
- The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/main/config/config.go#L11),
|
||||
- The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/master/config/config.go#L11),
|
||||
containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusConfig`, `RPCConfig`
|
||||
- This affects the following flags:
|
||||
- `--seeds` is now `--p2p.seeds`
|
||||
@@ -3574,7 +3340,7 @@ containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusCon
|
||||
|
||||
- Logger
|
||||
- Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`.
|
||||
See our new [logging library](https://github.com/tendermint/tendermint/blob/main/libs/log/logger.go) and [blog post](https://blog.cosmos.network/abstracting-the-logger-interface-in-go-4cf96bf90bb7) for more details
|
||||
See our new [logging library](https://github.com/tendermint/tmlibs/log) and [blog post](https://tendermint.com/blog/abstracting-the-logger-interface-in-go) for more details
|
||||
- Levels `warn` and `notice` are removed (you may need to change them in your `config.toml`!)
|
||||
- Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) to accept a logger
|
||||
|
||||
|
||||
@@ -1,110 +1,160 @@
|
||||
# Unreleased Changes
|
||||
|
||||
## v0.38.0
|
||||
## vX.X
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [pubsub/events] \#6634 The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez)
|
||||
- [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
|
||||
- [config] \#5728 `fast_sync = "v1"` is no longer supported (@melekes)
|
||||
- [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
|
||||
- [cli] \#5777 use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters)
|
||||
- [rpc] \#6019 standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters)
|
||||
- [rpc] \#6168 Change default sorting to desc for `/tx_search` results (@melekes)
|
||||
- [cli] \#6282 User must specify the node mode when using `tendermint init` (@cmwaters)
|
||||
- [state/indexer] \#6382 reconstruct indexer, move txindex into the indexer package (@JayT106)
|
||||
- [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
|
||||
- [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
|
||||
- [rpc] \#6610 Add MaxPeerBlockHeight into /status rpc call (@JayT106)
|
||||
- [fastsync/rpc] \#6620 Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106)
|
||||
- [rpc/grpc] \#6725 Mark gRPC in the RPC layer as deprecated.
|
||||
- [blockchain/v2] \#6730 Fast Sync v2 is deprecated, please use v0
|
||||
|
||||
- Apps
|
||||
- [abci] \#9468 Introduce `FinalizeBlock` which condenses `BeginBlock`, `DeliverTx` and `EndBlock`
|
||||
into a single method call (@cmwaters)
|
||||
- [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
|
||||
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
|
||||
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
|
||||
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
|
||||
- [ABCI] \#3546 Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield)
|
||||
- [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`.
|
||||
- It is not required any longer to set ldflags to set version strings
|
||||
- [abci/counter] \#6684 Delete counter example app
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- Go API
|
||||
- [p2p] \#9625 Remove unused p2p/trust package (@cmwaters)
|
||||
- [pubsub] \#6634 The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez)
|
||||
- [p2p] \#6618 Move `p2p.NodeInfo` into `types` to support use of the SDK. (@tychoish)
|
||||
- [p2p] \#6583 Make `p2p.NodeID` and `p2p.NetAddress` exported types to support their use in the RPC layer. (@tychoish)
|
||||
- [node] \#6540 Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish)
|
||||
- [p2p] \#6547 Move the entire `p2p` package and all reactor implementations into `internal`. (@tychoish)
|
||||
- [libs/log] \#6534 Remove the existing custom Tendermint logger backed by go-kit. The logging interface, `Logger`, remains. Tendermint still provides a default logger backed by the performant zerolog logger. (@alexanderbez)
|
||||
- [libs/time] \#6495 Move types/time to libs/time to improve consistency. (@tychoish)
|
||||
- [mempool] \#6529 The `Context` field has been removed from the `TxInfo` type. `CheckTx` now requires a `Context` argument. (@alexanderbez)
|
||||
- [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes)
|
||||
- [p2p] Remove unused function `MakePoWTarget`. (@erikgrinaker)
|
||||
- [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes)
|
||||
- [proto/p2p] Rename `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
|
||||
- [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker)
|
||||
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
|
||||
- [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters)
|
||||
- [state] \#5864 Use an iterator when pruning state (@cmwaters)
|
||||
- [types] \#6023 Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`.
|
||||
- Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic.
|
||||
- [light] \#6054 Move `MaxRetryAttempt` option from client to provider.
|
||||
- `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters)
|
||||
- [all] \#6077 Change spelling from British English to American (@cmwaters)
|
||||
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
|
||||
- Rename "behaviour" pkg to "behavior" and internalized it in blockchain v2
|
||||
- [rpc/client/http] \#6176 Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes)
|
||||
- [rpc/client/http] \#6176 Unexpose `WSEvents` (@melekes)
|
||||
- [rpc/jsonrpc/client/ws_client] \#6176 `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes)
|
||||
- [internal/libs] \#6366 Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder
|
||||
- [libs/rand] \#6364 Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi)
|
||||
- [mempool] \#6466 The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package.
|
||||
Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself
|
||||
and `TxInfo`. (@alexanderbez)
|
||||
- [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
|
||||
- [types] \#6627 Move `NodeKey` to types to make the type public.
|
||||
- [config] \#6627 Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID`
|
||||
- [blocksync] \#6755 Rename `FastSync` and `Blockchain` package to `BlockSync`
|
||||
(@cmwaters)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
- Data Storage
|
||||
- [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters)
|
||||
- [mempool] \#6396 Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish)
|
||||
- [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106)
|
||||
|
||||
- Tooling
|
||||
- [tools/tm-signer-harness] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
||||
- [metrics] \#9682 move state-syncing and block-syncing metrics to their respective packages (@cmwaters)
|
||||
labels have moved from block_syncing -> blocksync_syncing and state_syncing -> statesync_syncing
|
||||
- [tools] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
|
||||
- [cli/indexer] \#6676 Reindex events command line tooling. (@JayT106)
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [config] \#9680 Introduce `BootstrapPeers` to the config to allow nodes to list peers to be added to
|
||||
the addressbook upon start up (@cmwaters)
|
||||
- [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam
|
||||
- [rpc] \#6329 Don't cap page size in unsafe mode (@gotjoshua, @cmwaters)
|
||||
- [pex] \#6305 v2 pex reactor with backwards compatability. Introduces two new pex messages to
|
||||
accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer
|
||||
exchange reactors behave the same. (@cmwaters)
|
||||
- [crypto] \#6376 Enable sr25519 as a validator key
|
||||
- [mempool] \#6466 Introduction of a prioritized mempool. (@alexanderbez)
|
||||
- `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of
|
||||
the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index.
|
||||
- Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the
|
||||
`mempool.version` configuration, where `v1` is the default configuration.
|
||||
- Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node.
|
||||
- Transactions are gossiped in FIFO order as they are in `v0`.
|
||||
- [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
|
||||
- [fastsync/event] \#6619 Emit fastsync status event when switching consensus/fastsync (@JayT106)
|
||||
- [statesync/event] \#6700 Emit statesync status start/end event (@JayT106)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [pubsub] \#7319 Performance improvements for the event query API (@creachadair)
|
||||
- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm)
|
||||
- [crypto/merkle] \#6443 & \#6513 Improve HashAlternatives performance (@cuonglm, @marbar3778)
|
||||
- [rpc] \#9650 Enable caching of RPC responses (@JayT106)
|
||||
- [consensus] \#9760 Save peer LastCommit correctly to achieve 50% reduction in gossiped precommits. (@williambanfield)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [docker] \#9462 ensure Docker image uses consistent version of Go
|
||||
- [abci-cli] \#9717 fix broken abci-cli help command
|
||||
|
||||
## v0.37.0
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [config] \#9259 Rename the fastsync section and the fast_sync key blocksync and block_sync respectively
|
||||
|
||||
- Apps
|
||||
- [abci/counter] \#6684 Delete counter example app
|
||||
- [abci] \#5783 Make length delimiter encoding consistent (`uint64`) between ABCI and P2P wire-level protocols
|
||||
- [abci] \#9145 Removes unused Response/Request `SetOption` from ABCI (@samricotta)
|
||||
- [abci/params] \#9287 Deduplicate `ConsensusParams` and `BlockParams` so only `types` proto definitions are used (@cmwaters)
|
||||
- Remove `TimeIotaMs` and use a hard-coded 1 millisecond value to ensure monotonically increasing block times.
|
||||
- Rename `AppVersion` to `App` so as to not stutter.
|
||||
- [types] \#9287 Reduce the use of protobuf types in core logic. (@cmwaters)
|
||||
- `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams` have become native types.
|
||||
They still utilize protobuf when being sent over the wire or written to disk.
|
||||
- Moved `ValidateConsensusParams` inside (now native type) `ConsensusParams`, and renamed it to `ValidateBasic`.
|
||||
- [abci] \#9301 New ABCI methods `PrepareProposal` and `ProcessProposal` which give the app control over transactions proposed and allows for verification of proposed blocks.
|
||||
- [abci] \#8216 Renamed `EvidenceType` to `MisbehaviorType` and `Evidence` to `Misbehavior` as a more accurate label of their contents. (@williambanfield, @sergio-mena)
|
||||
- [abci] \#9122 Renamed `LastCommitInfo` to `CommitInfo` in preparation for vote extensions. (@cmwaters)
|
||||
- [abci] \#8656, \#8901 Added cli commands for `PrepareProposal` and `ProcessProposal`. (@jmalicevic, @hvanz)
|
||||
- [abci] \#6403 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- Go API
|
||||
- [all] \#9144 Change spelling from British English to American (@cmwaters)
|
||||
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
|
||||
- [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [abci] \#9301 New ABCI methods `PrepareProposal` and `ProcessProposal` which give the app control over transactions proposed and allows for verification of proposed blocks.
|
||||
|
||||
### IMPROVEMENTS
|
||||
- [crypto] \#9250 Update to use btcec v2 and the latest btcutil. (@wcsiu)
|
||||
|
||||
- [cli] \#9171 add `--hard` flag to rollback command (and a boolean to the `RollbackState` method). This will rollback
|
||||
state and remove the last block. This command can be triggered multiple times. The application must also rollback
|
||||
state to the same height. (@tsutsu, @cmwaters)
|
||||
- [proto] \#9356 Migrate from `gogo/protobuf` to `cosmos/gogoproto` (@julienrbrt)
|
||||
- [rpc] \#9276 Added `header` and `header_by_hash` queries to the RPC client (@samricotta)
|
||||
- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
|
||||
- [node] \#6059 Validate and complete genesis doc before saving to state store (@silasdavis)
|
||||
|
||||
- [libs/log] Console log formatting changes as a result of \#6534 and \#6589. (@tychoish)
|
||||
- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
||||
- [types] \#6478 Add `block_id` to `newblock` event (@jeebster)
|
||||
- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778)
|
||||
- [crypto/ed25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning)
|
||||
- [crypto/sr25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning)
|
||||
- [crypto] \#6120 Implement batch verification interface for ed25519 and sr25519. (@marbar3778 & @Yawning)
|
||||
- [types] \#6120 use batch verification for verifying commits signatures. (@marbar3778 & @cmwaters & @Yawning)
|
||||
- If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature.
|
||||
- [state] \#9505 Added logic so when pruning, the evidence period is taken into consideration and only deletes unecessary data (@samricotta)
|
||||
- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys.
|
||||
- [privval] \#5725 Add gRPC support to private validator.
|
||||
- [privval] \#5876 `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778)
|
||||
- [abci/client] \#5673 `Async` requests return an error if queue is full (@melekes)
|
||||
- [mempool] \#5673 Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes)
|
||||
- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
|
||||
- [blockchain/v1] \#5728 Remove in favor of v2 (@melekes)
|
||||
- [blockchain/v0] \#5741 Relax termination conditions and increase sync timeout (@melekes)
|
||||
- [cli] \#5772 `gen_node_key` output now contains node ID (`id` field) (@melekes)
|
||||
- [blockchain/v2] \#5774 Send status request when new peer joins (@melekes)
|
||||
- [consensus] \#5792 Deprecates the `time_iota_ms` consensus parameter, to reduce the bug surface. The parameter is no longer used. (@valardragon)
|
||||
- [store] \#5888 store.SaveBlock saves using batches instead of transactions for now to improve ACID properties. This is a quick fix for underlying issues around tm-db and ACID guarantees. (@githubsands)
|
||||
- [consensus] \#5987 Remove `time_iota_ms` from consensus params. Merge `tmproto.ConsensusParams` and `abci.ConsensusParams`. (@marbar3778)
|
||||
- [types] \#5994 Reduce the use of protobuf types in core logic. (@marbar3778)
|
||||
- `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams`, `sm.Version` and `version.Consensus` have become native types. They still utilize protobuf when being sent over the wire or written to disk.
|
||||
- [rpc/client/http] \#6163 Do not drop events even if the `out` channel is full (@melekes)
|
||||
- [node] \#6059 Validate and complete genesis doc before saving to state store (@silasdavis)
|
||||
- [state] \#6067 Batch save state data (@githubsands & @cmwaters)
|
||||
- [crypto] \#6120 Implement batch verification interface for ed25519 and sr25519. (@marbar3778)
|
||||
- [types] \#6120 use batch verification for verifying commits signatures.
|
||||
- If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature.
|
||||
- [privval/file] \#6185 Return error on `LoadFilePV`, `LoadFilePVEmptyState`. Allows for better programmatic control of Tendermint.
|
||||
- [privval] \#6240 Add `context.Context` to privval interface.
|
||||
- [rpc] \#6265 set cache control in http-rpc response header (@JayT106)
|
||||
- [statesync] \#6378 Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots.
|
||||
- [node/state] \#6370 graceful shutdown in the consensus reactor (@JayT106)
|
||||
- [crypto/merkle] \#6443 Improve HashAlternatives performance (@cuonglm)
|
||||
- [crypto/merkle] \#6513 Optimize HashAlternatives (@marbar3778)
|
||||
- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm)
|
||||
- [consensus/metrics] \#6549 Change block_size gauge to a histogram for better observability over time (@marbar3778)
|
||||
- [statesync] \#6587 Increase chunk priority and re-request chunks that don't arrive (@cmwaters)
|
||||
- [state/privval] \#6578 No GetPubKey retry beyond the proposal/voting window (@JayT106)
|
||||
- [rpc] \#6615 Add TotalGasUsed to block_results response (@crypto-facs)
|
||||
- [cmd/tendermint/commands] \#6623 replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] \#9229 fix round number of `enterPropose` when handling `RoundStepNewRound` timeout. (@fatcat22)
|
||||
- [docker] \#9073 enable cross platform build using docker buildx
|
||||
- [blocksync] \#9518 handle the case when the sending queue is full: retry block request after a timeout
|
||||
- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
|
||||
- [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes)
|
||||
- [blockchain/v1] \#5711 Fix deadlock (@melekes)
|
||||
- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
|
||||
- [rpc] \#6507 fix RPC client doesn't handle url's without ports (@JayT106)
|
||||
- [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
|
||||
- [fastsync] \#6590 Update the metrics during fast-sync (@JayT106)
|
||||
- [gitignore] \#6668 Fix gitignore of abci-cli (@tanyabouman)
|
||||
- [light] \#6687 Fix bug with incorrecly handled contexts in the light client (@cmwaters)
|
||||
|
||||
@@ -1,109 +1,59 @@
|
||||
# The Tendermint Code of Conduct
|
||||
|
||||
This code of conduct applies to all projects run by the Tendermint/COSMOS team
|
||||
and hence to Tendermint.
|
||||
This code of conduct applies to all projects run by the Tendermint/COSMOS team and hence to tendermint.
|
||||
|
||||
|
||||
----
|
||||
|
||||
|
||||
# Conduct
|
||||
|
||||
## Contact: conduct@tendermint.com
|
||||
|
||||
* We are committed to providing a friendly, safe and welcoming environment for
|
||||
all, regardless of level of experience, gender, gender identity and
|
||||
expression, sexual orientation, disability, personal appearance, body size,
|
||||
race, ethnicity, age, religion, nationality, or other similar characteristics.
|
||||
* We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic.
|
||||
|
||||
* On Slack, please avoid using overtly sexual nicknames or other nicknames that
|
||||
might detract from a friendly, safe and welcoming environment for all.
|
||||
* On Slack, please avoid using overtly sexual nicknames or other nicknames that might detract from a friendly, safe and welcoming environment for all.
|
||||
|
||||
* Please be kind and courteous. There’s no need to be mean or rude.
|
||||
|
||||
* Respect that people have differences of opinion and that every design or
|
||||
implementation choice carries a trade-off and numerous costs. There is seldom
|
||||
a right answer.
|
||||
* Respect that people have differences of opinion and that every design or implementation choice carries a trade-off and numerous costs. There is seldom a right answer.
|
||||
|
||||
* Please keep unstructured critique to a minimum. If you have solid ideas you
|
||||
want to experiment with, make a fork and see how it works.
|
||||
* Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works.
|
||||
|
||||
* We will exclude you from interaction if you insult, demean or harass anyone.
|
||||
That is not welcome behavior. We interpret the term “harassment” as including
|
||||
the definition in the [Citizen Code of Conduct][ccoc]; if you have any lack of
|
||||
clarity about what might be included in that concept, please read their
|
||||
definition. In particular, we don’t tolerate behavior that excludes people in
|
||||
socially marginalized groups.
|
||||
* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term “harassment” as including the definition in the [Citizen Code of Conduct](http://citizencodeofconduct.org/); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don’t tolerate behavior that excludes people in socially marginalized groups.
|
||||
|
||||
* Private harassment is also unacceptable. No matter who you are, if you feel
|
||||
you have been or are being harassed or made uncomfortable by a community
|
||||
member, please contact one of the channel admins or the person mentioned above
|
||||
immediately. Whether you’re a regular contributor or a newcomer, we care about
|
||||
making this community a safe place for you and we’ve got your back.
|
||||
* Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel admins or the person mentioned above immediately. Whether you’re a regular contributor or a newcomer, we care about making this community a safe place for you and we’ve got your back.
|
||||
|
||||
* Likewise any spamming, trolling, flaming, baiting or other attention-stealing behaviour is not welcome.
|
||||
|
||||
* Likewise any spamming, trolling, flaming, baiting or other attention-stealing
|
||||
behavior is not welcome.
|
||||
|
||||
----
|
||||
|
||||
|
||||
# Moderation
|
||||
|
||||
These are the policies for upholding our community’s standards of conduct. If
|
||||
you feel that a thread needs moderation, please contact the above mentioned
|
||||
person.
|
||||
These are the policies for upholding our community’s standards of conduct. If you feel that a thread needs moderation, please contact the above mentioned person.
|
||||
|
||||
1. Remarks that violate the Tendermint/COSMOS standards of conduct, including
|
||||
hateful, hurtful, oppressive, or exclusionary remarks, are not allowed.
|
||||
(Cursing is allowed, but never targeting another user, and never in a hateful
|
||||
manner.)
|
||||
1. Remarks that violate the Tendermint/COSMOS standards of conduct, including hateful, hurtful, oppressive, or exclusionary remarks, are not allowed. (Cursing is allowed, but never targeting another user, and never in a hateful manner.)
|
||||
|
||||
2. Remarks that moderators find inappropriate, whether listed in the code of
|
||||
conduct or not, are also not allowed.
|
||||
2. Remarks that moderators find inappropriate, whether listed in the code of conduct or not, are also not allowed.
|
||||
|
||||
3. Moderators will first respond to such remarks with a warning.
|
||||
|
||||
4. If the warning is unheeded, the user will be “kicked,” i.e., kicked out of
|
||||
the communication channel to cool off.
|
||||
4. If the warning is unheeded, the user will be “kicked,” i.e., kicked out of the communication channel to cool off.
|
||||
|
||||
5. If the user comes back and continues to make trouble, they will be banned,
|
||||
i.e., indefinitely excluded.
|
||||
5. If the user comes back and continues to make trouble, they will be banned, i.e., indefinitely excluded.
|
||||
|
||||
6. Moderators may choose at their discretion to un-ban the user if it was a
|
||||
first offense and they offer the offended party a genuine apology.
|
||||
6. Moderators may choose at their discretion to un-ban the user if it was a first offense and they offer the offended party a genuine apology.
|
||||
|
||||
7. If a moderator bans someone and you think it was unjustified, please take it
|
||||
up with that moderator, or with a different moderator, in private. Complaints
|
||||
about bans in-channel are not allowed.
|
||||
7. If a moderator bans someone and you think it was unjustified, please take it up with that moderator, or with a different moderator, in private. Complaints about bans in-channel are not allowed.
|
||||
|
||||
8. Moderators are held to a higher standard than other community members. If a
|
||||
moderator creates an inappropriate situation, they should expect less leeway
|
||||
than others.
|
||||
8. Moderators are held to a higher standard than other community members. If a moderator creates an inappropriate situation, they should expect less leeway than others.
|
||||
|
||||
In the Tendermint/COSMOS community we strive to go the extra step to look out
|
||||
for each other. Don’t just aim to be technically unimpeachable, try to be your
|
||||
best self. In particular, avoid flirting with offensive or sensitive issues,
|
||||
particularly if they’re off-topic; this all too often leads to unnecessary
|
||||
fights, hurt feelings, and damaged trust; worse, it can drive people away
|
||||
from the community entirely.
|
||||
In the Tendermint/COSMOS community we strive to go the extra step to look out for each other. Don’t just aim to be technically unimpeachable, try to be your best self. In particular, avoid flirting with offensive or sensitive issues, particularly if they’re off-topic; this all too often leads to unnecessary fights, hurt feelings, and damaged trust; worse, it can drive people away from the community entirely.
|
||||
|
||||
And if someone takes issue with something you said or did, resist the urge to be
|
||||
defensive. Just stop doing what it was they complained about and apologize. Even
|
||||
if you feel you were misinterpreted or unfairly accused, chances are good there
|
||||
was something you could’ve communicated better — remember that it’s your
|
||||
responsibility to make your fellow Cosmonauts comfortable. Everyone wants to
|
||||
get along and we are all here first and foremost because we want to talk
|
||||
about cool technology. You will find that people will be eager to assume
|
||||
good intent and forgive as long as you earn their trust.
|
||||
And if someone takes issue with something you said or did, resist the urge to be defensive. Just stop doing what it was they complained about and apologize. Even if you feel you were misinterpreted or unfairly accused, chances are good there was something you could’ve communicated better — remember that it’s your responsibility to make your fellow Cosmonauts comfortable. Everyone wants to get along and we are all here first and foremost because we want to talk about cool technology. You will find that people will be eager to assume good intent and forgive as long as you earn their trust.
|
||||
|
||||
The enforcement policies listed above apply to all official Tendermint/COSMOS
|
||||
venues. For other projects adopting the Tendermint/COSMOS Code of Conduct,
|
||||
please contact the maintainers of those projects for enforcement. If you wish to
|
||||
use this code of conduct for your own project, consider explicitly mentioning
|
||||
your moderation policy or making a copy with your own moderation policy so as to
|
||||
avoid confusion.
|
||||
The enforcement policies listed above apply to all official Tendermint/COSMOS venues.For other projects adopting the Tendermint/COSMOS Code of Conduct, please contact the maintainers of those projects for enforcement. If you wish to use this code of conduct for your own project, consider explicitly mentioning your moderation policy or making a copy with your own moderation policy so as to avoid confusion.
|
||||
|
||||
\*Adapted from the [Node.js Policy on Trolling][node-trolling-policy], the
|
||||
[Contributor Covenant v1.3.0][ccov] and the [Rust Code of Conduct][rust-coc].
|
||||
|
||||
[ccoc]: https://github.com/stumpsyn/policies/blob/master/citizen_code_of_conduct.md
|
||||
[node-trolling-policy]: http://blog.izs.me/post/30036893703/policy-on-trolling
|
||||
[ccov]: http://contributor-covenant.org/version/1/3/0/
|
||||
[rust-coc]: https://www.rust-lang.org/en-US/conduct.html
|
||||
*Adapted from the [Node.js Policy on Trolling](http://blog.izs.me/post/30036893703/policy-on-trolling), the [Contributor Covenant v1.3.0](http://contributor-covenant.org/version/1/3/0/) and the [Rust Code of Conduct](https://www.rust-lang.org/en-US/conduct.html).
|
||||
|
||||
227
CONTRIBUTING.md
227
CONTRIBUTING.md
@@ -7,12 +7,12 @@ support permissionless value-carrying networks. While all contributions are
|
||||
welcome, contributors should bear this goal in mind in deciding if they should
|
||||
target the main Tendermint project or a potential fork. When targeting the
|
||||
main Tendermint project, the following process leads to the best chance of
|
||||
landing changes in `main`.
|
||||
landing changes in master.
|
||||
|
||||
All work on the code base should be motivated by a [Github
|
||||
Issue](https://github.com/tendermint/tendermint/issues).
|
||||
[Search](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22)
|
||||
is a good place to start when looking for places to contribute. If you
|
||||
is a good place start when looking for places to contribute. If you
|
||||
would like to work on an issue which already exists, please indicate so
|
||||
by leaving a comment.
|
||||
|
||||
@@ -26,7 +26,7 @@ will indicate their support with a heartfelt emoji.
|
||||
|
||||
If the issue would benefit from thorough discussion, maintainers may
|
||||
request that you create a [Request For
|
||||
Comment](https://github.com/tendermint/tendermint/tree/main/docs/rfc)
|
||||
Comment](https://github.com/tendermint/spec/tree/master/rfc)
|
||||
in the Tendermint spec repo. Discussion
|
||||
at the RFC stage will build collective understanding of the dimensions
|
||||
of the problems and help structure conversations around trade-offs.
|
||||
@@ -46,7 +46,7 @@ Find the largest existing ADR number and bump it by 1.
|
||||
When the problem as well as proposed solution are well understood,
|
||||
changes should start with a [draft
|
||||
pull request](https://github.blog/2019-02-14-introducing-draft-pull-requests/)
|
||||
against `main`. The draft signals that work is underway. When the work
|
||||
against master. The draft signals that work is underway. When the work
|
||||
is ready for feedback, hitting "Ready for Review" will signal to the
|
||||
maintainers to take a look.
|
||||
|
||||
@@ -54,7 +54,7 @@ maintainers to take a look.
|
||||
|
||||
Each stage of the process is aimed at creating feedback cycles which align contributors and maintainers to make sure:
|
||||
|
||||
- Contributors don’t waste their time implementing/proposing features which won’t land in `main`.
|
||||
- Contributors don’t waste their time implementing/proposing features which won’t land in master.
|
||||
- Maintainers have the necessary context in order to support and review contributions.
|
||||
|
||||
## Forking
|
||||
@@ -73,19 +73,19 @@ For instance, to create a fork and work on a branch of it, I would:
|
||||
- `git remote add origin git@github.com:ebuchman/basecoin.git`
|
||||
|
||||
Now `origin` refers to my fork and `upstream` refers to the Tendermint version.
|
||||
So I can `git push -u origin main` to update my fork, and make pull requests to tendermint from there.
|
||||
So I can `git push -u origin master` to update my fork, and make pull requests to tendermint from there.
|
||||
Of course, replace `ebuchman` with your git handle.
|
||||
|
||||
To pull in updates from the origin repo, run
|
||||
|
||||
- `git fetch upstream`
|
||||
- `git rebase upstream/main` (or whatever branch you want)
|
||||
- `git rebase upstream/master` (or whatever branch you want)
|
||||
|
||||
## Dependencies
|
||||
|
||||
We use [go modules](https://github.com/golang/go/wiki/Modules) to manage dependencies.
|
||||
|
||||
That said, the `main` branch of every Tendermint repository should just build
|
||||
That said, the master branch of every Tendermint repository should just build
|
||||
with `go get`, which means they should be kept up-to-date with their
|
||||
dependencies so we can get away with telling people they can just `go get` our
|
||||
software.
|
||||
@@ -105,33 +105,11 @@ specify exactly the dependency you want to update, eg.
|
||||
|
||||
## Protobuf
|
||||
|
||||
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along
|
||||
with [`gogoproto`](https://github.com/cosmos/gogoproto) to generate code for use
|
||||
across Tendermint Core.
|
||||
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/gogo/protobuf) to generate code for use across Tendermint Core.
|
||||
|
||||
To generate proto stubs, lint, and check protos for breaking changes, you will
|
||||
need to install [buf](https://buf.build/) and `gogoproto`. Then, from the root
|
||||
of the repository, run:
|
||||
For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`.
|
||||
|
||||
```bash
|
||||
# Lint all of the .proto files in proto/tendermint
|
||||
make proto-lint
|
||||
|
||||
# Check if any of your local changes (prior to committing to the Git repository)
|
||||
# are breaking
|
||||
make proto-check-breaking
|
||||
|
||||
# Generate Go code from the .proto files in proto/tendermint
|
||||
make proto-gen
|
||||
```
|
||||
|
||||
To automatically format `.proto` files, you will need
|
||||
[`clang-format`](https://clang.llvm.org/docs/ClangFormat.html) installed. Once
|
||||
installed, you can run:
|
||||
|
||||
```bash
|
||||
make proto-format
|
||||
```
|
||||
We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`.
|
||||
|
||||
### Visual Studio Code
|
||||
|
||||
@@ -142,6 +120,7 @@ If you are a VS Code user, you may want to add the following to your `.vscode/se
|
||||
"protoc": {
|
||||
"options": [
|
||||
"--proto_path=${workspaceRoot}/proto",
|
||||
"--proto_path=${workspaceRoot}/third_party/proto"
|
||||
]
|
||||
}
|
||||
}
|
||||
@@ -152,47 +131,10 @@ If you are a VS Code user, you may want to add the following to your `.vscode/se
|
||||
Every fix, improvement, feature, or breaking change should be made in a
|
||||
pull-request that includes an update to the `CHANGELOG_PENDING.md` file.
|
||||
|
||||
A feature can also be worked on a feature branch, if its size and/or risk
|
||||
justifies it (see #branching-model-and-release) below.
|
||||
|
||||
### What does a good changelog entry look like?
|
||||
|
||||
Changelog entries should answer the question: "what is important about this
|
||||
change for users to know?" or "what problem does this solve for users?". It
|
||||
should not simply be a reiteration of the title of the associated PR, unless the
|
||||
title of the PR _very_ clearly explains the benefit of a change to a user.
|
||||
|
||||
Some good examples of changelog entry descriptions:
|
||||
|
||||
```md
|
||||
- [consensus] \#1111 Small transaction throughput improvement (approximately
|
||||
3-5\% from preliminary tests) through refactoring the way we use channels
|
||||
- [mempool] \#1112 Refactor Go API to be able to easily swap out the current
|
||||
mempool implementation in Tendermint forks
|
||||
- [p2p] \#1113 Automatically ban peers when their messages are unsolicited or
|
||||
are received too frequently
|
||||
```
|
||||
|
||||
Some bad examples of changelog entry descriptions:
|
||||
|
||||
```md
|
||||
- [consensus] \#1111 Refactor channel usage
|
||||
- [mempool] \#1112 Make API generic
|
||||
- [p2p] \#1113 Ban for PEX message abuse
|
||||
```
|
||||
|
||||
For more on how to write good changelog entries, see:
|
||||
|
||||
- <https://keepachangelog.com>
|
||||
- <https://docs.gitlab.com/ee/development/changelog.html#writing-good-changelog-entries>
|
||||
- <https://depfu.com/blog/what-makes-a-good-changelog>
|
||||
|
||||
### Changelog entry format
|
||||
|
||||
Changelog entries should be formatted as follows:
|
||||
|
||||
```md
|
||||
- [module] \#xxx Some description of the change (@contributor)
|
||||
- [module] \#xxx Some description about the change (@contributor)
|
||||
```
|
||||
|
||||
Here, `module` is the part of the code that changed (typically a
|
||||
@@ -213,46 +155,37 @@ Changes with multiple classifications should be doubly included (eg. a bug fix
|
||||
that is also a breaking change should be recorded under both).
|
||||
|
||||
Breaking changes are further subdivided according to the APIs/users they impact.
|
||||
Any change that affects multiple APIs/users should be recorded multiply - for
|
||||
Any change that effects multiple APIs/users should be recorded multiply - for
|
||||
instance, a change to the `Blockchain Protocol` that removes a field from the
|
||||
header should also be recorded under `CLI/RPC/Config` since the field will be
|
||||
removed from the header in RPC responses as well.
|
||||
|
||||
## Branching Model and Release
|
||||
|
||||
The main development branch is `main`.
|
||||
The main development branch is master.
|
||||
|
||||
Every release is maintained in a release branch named `vX.Y.Z`.
|
||||
|
||||
Pending minor releases have long-lived release candidate ("RC") branches. Minor release changes should be merged to these long-lived RC branches at the same time that the changes are merged to `main`.
|
||||
|
||||
If a feature's size is big and/or its risk is high, it can be implemented in a feature branch.
|
||||
While the feature work is in progress,
|
||||
pull requests are open and squash merged against the feature branch.
|
||||
Branch `main` is periodically merged (merge commit) into the feature branch,
|
||||
to reduce branch divergence.
|
||||
When the feature is complete, the feature branch is merged back (merge commit) into `main`.
|
||||
The moment of the final merge can be carefully chosen
|
||||
so as to land different features in different releases.
|
||||
Pending minor releases have long-lived release candidate ("RC") branches. Minor release changes should be merged to these long-lived RC branches at the same time that the changes are merged to master.
|
||||
|
||||
Note all pull requests should be squash merged except for merging to a release branch (named `vX.Y`). This keeps the commit history clean and makes it
|
||||
easy to reference the pull request where a change was introduced.
|
||||
|
||||
### Development Procedure
|
||||
|
||||
The latest state of development is on `main`, which must never fail `make test`. _Never_ force push `main`, unless fixing broken git history (which we rarely do anyways).
|
||||
The latest state of development is on `master`, which must never fail `make test`. _Never_ force push `master`, unless fixing broken git history (which we rarely do anyways).
|
||||
|
||||
To begin contributing, create a development branch either on `github.com/tendermint/tendermint`, or your fork (using `git remote add origin`).
|
||||
|
||||
Make changes, and before submitting a pull request, update the `CHANGELOG_PENDING.md` to record your change. Also, run either `git rebase` or `git merge` on top of the latest `main`. (Since pull requests are squash-merged, either is fine!)
|
||||
Make changes, and before submitting a pull request, update the `CHANGELOG_PENDING.md` to record your change. Also, run either `git rebase` or `git merge` on top of the latest `master`. (Since pull requests are squash-merged, either is fine!)
|
||||
|
||||
Update the `UPGRADING.md` if the change you've made is breaking and the
|
||||
instructions should be in place for a user on how he/she can upgrade its
|
||||
instructions should be in place for a user on how he/she can upgrade it's
|
||||
software (ABCI application, Tendermint-based blockchain, light client, wallet).
|
||||
|
||||
Once you have submitted a pull request label the pull request with either `R:minor`, if the change should be included in the next minor release, or `R:major`, if the change is meant for a major release.
|
||||
|
||||
Sometimes (often!) pull requests get out-of-date with `main`, as other people merge different pull requests to `main`. It is our convention that pull request authors are responsible for updating their branches with `main`. (This also means that you shouldn't update someone else's branch for them; even if it seems like you're doing them a favor, you may be interfering with their git flow in some way!)
|
||||
Sometimes (often!) pull requests get out-of-date with master, as other people merge different pull requests to master. It is our convention that pull request authors are responsible for updating their branches with master. (This also means that you shouldn't update someone else's branch for them; even if it seems like you're doing them a favor, you may be interfering with their git flow in some way!)
|
||||
|
||||
#### Merging Pull Requests
|
||||
|
||||
@@ -260,20 +193,20 @@ It is also our convention that authors merge their own pull requests, when possi
|
||||
|
||||
Before merging a pull request:
|
||||
|
||||
- Ensure pull branch is up-to-date with a recent `main` (GitHub won't let you merge without this!)
|
||||
- Ensure pull branch is up-to-date with a recent `master` (GitHub won't let you merge without this!)
|
||||
- Run `make test` to ensure that all tests pass
|
||||
- [Squash](https://stackoverflow.com/questions/5189560/squash-my-last-x-commits-together-using-git) merge pull request
|
||||
|
||||
#### Pull Requests for Minor Releases
|
||||
|
||||
If your change should be included in a minor release, please also open a PR against the long-lived minor release candidate branch (e.g., `rc1/v0.33.5`) _immediately after your change has been merged to main_.
|
||||
If your change should be included in a minor release, please also open a PR against the long-lived minor release candidate branch (e.g., `rc1/v0.33.5`) _immediately after your change has been merged to master_.
|
||||
|
||||
You can do this by cherry-picking your commit off `main`:
|
||||
You can do this by cherry-picking your commit off master:
|
||||
|
||||
```sh
|
||||
$ git checkout rc1/v0.33.5
|
||||
$ git checkout -b {new branch name}
|
||||
$ git cherry-pick {commit SHA from main}
|
||||
$ git cherry-pick {commit SHA from master}
|
||||
# may need to fix conflicts, and then use git add and git cherry-pick --continue
|
||||
$ git push origin {new branch name}
|
||||
```
|
||||
@@ -292,7 +225,117 @@ cmd/debug: execute p.Signal only when p is not nil
|
||||
Fixes #nnnn
|
||||
```
|
||||
|
||||
Each PR should have one commit once it lands on `main`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though!
|
||||
Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though!
|
||||
|
||||
### Release Procedure
|
||||
|
||||
#### Major Release
|
||||
|
||||
This major release process assumes that this release was preceded by release candidates.
|
||||
If there were no release candidates, and you'd like to cut a major release directly from master, see below.
|
||||
|
||||
1. Start on the latest RC branch (`RCx/vX.X.0`).
|
||||
2. Run integration tests.
|
||||
3. Branch off of the RC branch (`git checkout -b release-prep`) and prepare the release:
|
||||
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
||||
and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
(Squashing includes both combining all entries, as well as removing or simplifying
|
||||
any intra-RC changes. It may also help to alphabetize the entries by package name.)
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Open a PR with these changes against the RC branch (`RCx/vX.X.0`).
|
||||
5. Once these changes are on the RC branch, branch off of the RC branch again to create a release branch:
|
||||
- `git checkout RCx/vX.X.0`
|
||||
- `git checkout -b release/vX.X.0`
|
||||
6. Push a tag with prepared release details. This will trigger the actual release `vX.X.0`.
|
||||
- `git tag -a vX.X.0 -m 'Release vX.X.0'`
|
||||
- `git push origin vX.X.0`
|
||||
7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
8. Create the long-lived minor release branch `RC0/vX.X.1` for the next point release on this
|
||||
new major release series.
|
||||
|
||||
##### Major Release (from `master`)
|
||||
|
||||
1. Start on `master`
|
||||
2. Run integration tests (see `test_integrations` in Makefile)
|
||||
3. Prepare release in a pull request against `master` (to be squash merged):
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`; if this release
|
||||
had release candidates, squash all the RC updates into one
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest
|
||||
release, and add the github aliases of external contributors to the top of
|
||||
the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
- Make sure all significant breaking changes are covered in `UPGRADING.md`
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Push a tag with prepared release details (this will trigger the release `vX.X.0`)
|
||||
- `git tag -a vX.X.x -m 'Release vX.X.x'`
|
||||
- `git push origin vX.X.x`
|
||||
5. Update the `CHANGELOG.md` file on master with the releases changelog.
|
||||
6. Delete any RC branches and tags for this release (if applicable)
|
||||
|
||||
#### Minor Release (Point Releases)
|
||||
|
||||
Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master.
|
||||
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, and
|
||||
the backport branches have names like `v0.34.x` or `v0.33.x` (literally, `x`; it is not a placeholder in this case).
|
||||
|
||||
As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches.
|
||||
|
||||
We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport to the needed branch. Depending on which backport branch you need to backport to there will be labels for them. To notify the bot to backport a pull request, mark the pull request with the label `backport-to-<backport_branch>`. Once the original pull request is merged, the bot will try to cherry-pick the pull request to the backport branch. If the bot fails to backport, it will open a pull request. The author of the original pull request is responsible for solving the conflicts and merging the pull request.
|
||||
|
||||
Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate.
|
||||
|
||||
To create a minor release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout vX.X.x`
|
||||
2. Run integration tests: `make test_integrations`
|
||||
3. Check out a new branch and prepare the release:
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during minor releases, and only field additions are valid minor changes.)
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Open a PR with these changes that will land them back on `vX.X.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a vX.X.x -m 'Release vX.X.x'`
|
||||
- `git push origin vX.X.x`
|
||||
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:minor` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into master.
|
||||
|
||||
#### Release Candidates
|
||||
|
||||
Before creating an official release, especially a major release, we may want to create a
|
||||
release candidate (RC) for our friends and partners to test out. We use git tags to
|
||||
create RCs, and we build them off of RC branches. RC branches typically have names formatted
|
||||
like `RCX/vX.X.X` (or, concretely, `RC0/v0.34.0`), while the tags themselves follow
|
||||
the "standard" release naming conventions, with `-rcX` at the end (`vX.X.X-rcX`).
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
|
||||
have distinct names from the tags/release names.)
|
||||
|
||||
1. Start from the RC branch (e.g. `RC0/v0.34.0`).
|
||||
2. Create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.34.0-rc0 -m "Release Candidate v0.34.0-rc0`
|
||||
3. Push the tag back up to origin:
|
||||
`git push origin v0.34.0-rc4`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
4. Create a new release candidate branch for any possible updates to the RC:
|
||||
`git checkout -b RC1/v0.34.0; git push origin RC1/v0.34.0`
|
||||
|
||||
## Testing
|
||||
|
||||
|
||||
@@ -1,18 +1,14 @@
|
||||
# Use a build arg to ensure that both stages use the same,
|
||||
# hopefully current, go version.
|
||||
ARG GOLANG_BASE_IMAGE=golang:1.18-alpine
|
||||
|
||||
# stage 1 Generate Tendermint Binary
|
||||
FROM --platform=$BUILDPLATFORM $GOLANG_BASE_IMAGE as builder
|
||||
FROM golang:1.16-alpine as builder
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk --no-cache add make
|
||||
COPY / /tendermint
|
||||
WORKDIR /tendermint
|
||||
RUN TARGETPLATFORM=$TARGETPLATFORM make build-linux
|
||||
RUN make build-linux
|
||||
|
||||
# stage 2
|
||||
FROM $GOLANG_BASE_IMAGE
|
||||
FROM golang:1.15-alpine
|
||||
LABEL maintainer="hello@tendermint.com"
|
||||
|
||||
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json
|
||||
@@ -53,7 +49,7 @@ ENV PROXY_APP=kvstore MONIKER=dockernode CHAIN_ID=dockerchain
|
||||
COPY ./DOCKER/docker-entrypoint.sh /usr/local/bin/
|
||||
|
||||
ENTRYPOINT ["docker-entrypoint.sh"]
|
||||
CMD ["node"]
|
||||
CMD ["start"]
|
||||
|
||||
# Expose the data directory as a volume since there's mutable state in there
|
||||
VOLUME [ "$TMHOME" ]
|
||||
|
||||
@@ -9,7 +9,7 @@ RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm &
|
||||
RUN yum -y groupinstall "Development Tools"
|
||||
RUN yum -y install leveldb-devel which
|
||||
|
||||
ENV GOVERSION=1.12.9
|
||||
ENV GOVERSION=1.16.5
|
||||
|
||||
RUN cd /tmp && \
|
||||
wget https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
|
||||
@@ -6,9 +6,9 @@ DockerHub tags for official releases are [here](https://hub.docker.com/r/tenderm
|
||||
|
||||
Official releases can be found [here](https://github.com/tendermint/tendermint/releases).
|
||||
|
||||
The Dockerfile for Tendermint is not expected to change in the near future. The main file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/main/DOCKER/Dockerfile).
|
||||
The Dockerfile for tendermint is not expected to change in the near future. The master file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/master/DOCKER/Dockerfile).
|
||||
|
||||
Respective versioned files can be found at `https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile` (replace the Xs with the version number).
|
||||
Respective versioned files can be found <https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile> (replace the Xs with the version number).
|
||||
|
||||
## Quick reference
|
||||
|
||||
@@ -20,9 +20,9 @@ Respective versioned files can be found at `https://raw.githubusercontent.com/te
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language, and securely replicates it on many machines.
|
||||
|
||||
For more background, see the [the docs](https://docs.tendermint.com/main/introduction/#quick-start).
|
||||
For more background, see the [the docs](https://docs.tendermint.com/master/introduction/#quick-start).
|
||||
|
||||
To get started developing applications, see the [application developers guide](https://docs.tendermint.com/main/introduction/quick-start.html).
|
||||
To get started developing applications, see the [application developers guide](https://docs.tendermint.com/master/introduction/quick-start.html).
|
||||
|
||||
## How to use this image
|
||||
|
||||
@@ -31,13 +31,13 @@ To get started developing applications, see the [application developers guide](h
|
||||
A quick example of a built-in app and Tendermint core in one container.
|
||||
|
||||
```sh
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app=kvstore
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init validator
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint start --proxy-app=kvstore
|
||||
```
|
||||
|
||||
## Local cluster
|
||||
|
||||
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/blob/main/Makefile) and run:
|
||||
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/blob/master/Makefile) and run:
|
||||
|
||||
```sh
|
||||
make build-linux
|
||||
@@ -49,8 +49,8 @@ Note that this will build and use a different image than the ones provided here.
|
||||
|
||||
## License
|
||||
|
||||
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/blob/main/LICENSE).
|
||||
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/blob/master/LICENSE).
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/main/CONTRIBUTING.md) for more information.
|
||||
Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/master/CONTRIBUTING.md) for more information.
|
||||
|
||||
@@ -3,14 +3,14 @@ set -e
|
||||
|
||||
if [ ! -d "$TMHOME/config" ]; then
|
||||
echo "Running tendermint init to create (default) configuration for docker run."
|
||||
tendermint init
|
||||
tendermint init validator
|
||||
|
||||
sed -i \
|
||||
-e "s/^proxy_app\s*=.*/proxy_app = \"$PROXY_APP\"/" \
|
||||
-e "s/^proxy-app\s*=.*/proxy-app = \"$PROXY_APP\"/" \
|
||||
-e "s/^moniker\s*=.*/moniker = \"$MONIKER\"/" \
|
||||
-e 's/^addr_book_strict\s*=.*/addr_book_strict = false/' \
|
||||
-e 's/^timeout_commit\s*=.*/timeout_commit = "500ms"/' \
|
||||
-e 's/^index_all_tags\s*=.*/index_all_tags = true/' \
|
||||
-e 's/^addr-book-strict\s*=.*/addr-book-strict = false/' \
|
||||
-e 's/^timeout-commit\s*=.*/timeout-commit = "500ms"/' \
|
||||
-e 's/^index-all-tags\s*=.*/index-all-tags = true/' \
|
||||
-e 's,^laddr = "tcp://127.0.0.1:26657",laddr = "tcp://0.0.0.0:26657",' \
|
||||
-e 's/^prometheus\s*=.*/prometheus = true/' \
|
||||
"$TMHOME/config/config.toml"
|
||||
|
||||
6
LICENSE
6
LICENSE
@@ -1,3 +1,5 @@
|
||||
Tendermint Core
|
||||
License: Apache2.0
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
@@ -179,7 +181,7 @@
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
@@ -187,7 +189,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
Copyright 2016 All in Bits, Inc
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
260
Makefile
260
Makefile
@@ -1,13 +1,21 @@
|
||||
#!/usr/bin/make -f
|
||||
|
||||
PACKAGES=$(shell go list ./...)
|
||||
BUILDDIR?=$(CURDIR)/build
|
||||
OUTPUT?=$(BUILDDIR)/tendermint
|
||||
BUILDDIR ?= $(CURDIR)/build
|
||||
|
||||
BUILD_TAGS?=tendermint
|
||||
|
||||
COMMIT_HASH := $(shell git rev-parse --short HEAD)
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMGitCommitHash=$(COMMIT_HASH)
|
||||
# If building a release, please checkout the version tag to get the correct version setting
|
||||
ifneq ($(shell git symbolic-ref -q --short HEAD),)
|
||||
VERSION := unreleased-$(shell git symbolic-ref -q --short HEAD)-$(shell git rev-parse HEAD)
|
||||
else
|
||||
VERSION := $(shell git describe)
|
||||
endif
|
||||
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION)
|
||||
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
|
||||
HTTPS_GIT := https://github.com/tendermint/tendermint.git
|
||||
DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf
|
||||
CGO_ENABLED ?= 0
|
||||
|
||||
# handle nostrip
|
||||
@@ -47,152 +55,54 @@ endif
|
||||
# allow users to pass additional flags via the conventional LDFLAGS variable
|
||||
LD_FLAGS += $(LDFLAGS)
|
||||
|
||||
# Process Docker environment varible TARGETPLATFORM
|
||||
# in order to build binary with correspondent ARCH
|
||||
# by default will always build for linux/amd64
|
||||
TARGETPLATFORM ?=
|
||||
GOOS ?= linux
|
||||
GOARCH ?= amd64
|
||||
GOARM ?=
|
||||
|
||||
ifeq (linux/arm,$(findstring linux/arm,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=arm
|
||||
GOARM=7
|
||||
endif
|
||||
|
||||
ifeq (linux/arm/v6,$(findstring linux/arm/v6,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=arm
|
||||
GOARM=6
|
||||
endif
|
||||
|
||||
ifeq (linux/arm64,$(findstring linux/arm64,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=arm64
|
||||
GOARM=7
|
||||
endif
|
||||
|
||||
ifeq (linux/386,$(findstring linux/386,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=386
|
||||
endif
|
||||
|
||||
ifeq (linux/amd64,$(findstring linux/amd64,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=amd64
|
||||
endif
|
||||
|
||||
ifeq (linux/mips,$(findstring linux/mips,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=mips
|
||||
endif
|
||||
|
||||
ifeq (linux/mipsle,$(findstring linux/mipsle,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=mipsle
|
||||
endif
|
||||
|
||||
ifeq (linux/mips64,$(findstring linux/mips64,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=mips64
|
||||
endif
|
||||
|
||||
ifeq (linux/mips64le,$(findstring linux/mips64le,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=mips64le
|
||||
endif
|
||||
|
||||
ifeq (linux/riscv64,$(findstring linux/riscv64,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=riscv64
|
||||
endif
|
||||
|
||||
all: check build test install
|
||||
.PHONY: all
|
||||
|
||||
include tests.mk
|
||||
include test/Makefile
|
||||
|
||||
###############################################################################
|
||||
### Build Tendermint ###
|
||||
### Build Tendermint ###
|
||||
###############################################################################
|
||||
|
||||
build:
|
||||
CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o $(OUTPUT) ./cmd/tendermint/
|
||||
build: $(BUILDDIR)/
|
||||
CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o $(BUILDDIR)/ ./cmd/tendermint/
|
||||
.PHONY: build
|
||||
|
||||
install:
|
||||
CGO_ENABLED=$(CGO_ENABLED) go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
|
||||
.PHONY: install
|
||||
|
||||
###############################################################################
|
||||
### Metrics ###
|
||||
###############################################################################
|
||||
|
||||
metrics: testdata-metrics
|
||||
go generate -run="scripts/metricsgen" ./...
|
||||
.PHONY: metrics
|
||||
|
||||
# By convention, the go tool ignores subdirectories of directories named
|
||||
# 'testdata'. This command invokes the generate command on the folder directly
|
||||
# to avoid this.
|
||||
testdata-metrics:
|
||||
ls ./scripts/metricsgen/testdata | xargs -I{} go generate -v -run="scripts/metricsgen" ./scripts/metricsgen/testdata/{}
|
||||
.PHONY: testdata-metrics
|
||||
|
||||
###############################################################################
|
||||
### Mocks ###
|
||||
###############################################################################
|
||||
|
||||
mockery:
|
||||
go generate -run="./scripts/mockery_generate.sh" ./...
|
||||
.PHONY: mockery
|
||||
$(BUILDDIR)/:
|
||||
mkdir -p $@
|
||||
|
||||
###############################################################################
|
||||
### Protobuf ###
|
||||
###############################################################################
|
||||
|
||||
check-proto-deps:
|
||||
ifeq (,$(shell which protoc-gen-gogofaster))
|
||||
@go install github.com/cosmos/gogoproto/protoc-gen-gogofaster@latest
|
||||
endif
|
||||
.PHONY: check-proto-deps
|
||||
proto-all: proto-gen proto-lint proto-check-breaking
|
||||
.PHONY: proto-all
|
||||
|
||||
check-proto-format-deps:
|
||||
ifeq (,$(shell which clang-format))
|
||||
$(error "clang-format is required for Protobuf formatting. See instructions for your platform on how to install it.")
|
||||
endif
|
||||
.PHONY: check-proto-format-deps
|
||||
|
||||
proto-gen: check-proto-deps
|
||||
proto-gen:
|
||||
@docker pull -q tendermintdev/docker-build-proto
|
||||
@echo "Generating Protobuf files"
|
||||
@go run github.com/bufbuild/buf/cmd/buf generate
|
||||
@mv ./proto/tendermint/abci/types.pb.go ./abci/types/
|
||||
@cp ./proto/tendermint/rpc/grpc/types.pb.go ./rpc/grpc
|
||||
@docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto sh ./scripts/protocgen.sh
|
||||
.PHONY: proto-gen
|
||||
|
||||
# These targets are provided for convenience and are intended for local
|
||||
# execution only.
|
||||
proto-lint: check-proto-deps
|
||||
@echo "Linting Protobuf files"
|
||||
@go run github.com/bufbuild/buf/cmd/buf lint
|
||||
proto-lint:
|
||||
@$(DOCKER_BUF) check lint --error-format=json
|
||||
.PHONY: proto-lint
|
||||
|
||||
proto-format: check-proto-format-deps
|
||||
proto-format:
|
||||
@echo "Formatting Protobuf files"
|
||||
@find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \;
|
||||
docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \;
|
||||
.PHONY: proto-format
|
||||
|
||||
proto-check-breaking: check-proto-deps
|
||||
@echo "Checking for breaking changes in Protobuf files against local branch"
|
||||
@echo "Note: This is only useful if your changes have not yet been committed."
|
||||
@echo " Otherwise read up on buf's \"breaking\" command usage:"
|
||||
@echo " https://docs.buf.build/breaking/usage"
|
||||
@go run github.com/bufbuild/buf/cmd/buf breaking --against ".git"
|
||||
proto-check-breaking:
|
||||
@$(DOCKER_BUF) check breaking --against-input .git#branch=master
|
||||
.PHONY: proto-check-breaking
|
||||
|
||||
proto-check-breaking-ci:
|
||||
@go run github.com/bufbuild/buf/cmd/buf breaking --against $(HTTPS_GIT)#branch=v0.34.x
|
||||
@$(DOCKER_BUF) check breaking --against-input $(HTTPS_GIT)#branch=master
|
||||
.PHONY: proto-check-breaking-ci
|
||||
|
||||
###############################################################################
|
||||
@@ -207,6 +117,27 @@ install_abci:
|
||||
@go install -mod=readonly ./abci/cmd/...
|
||||
.PHONY: install_abci
|
||||
|
||||
###############################################################################
|
||||
### Privval Server ###
|
||||
###############################################################################
|
||||
|
||||
build_privval_server:
|
||||
@go build -mod=readonly -o $(BUILDDIR)/ -i ./cmd/priv_val_server/...
|
||||
.PHONY: build_privval_server
|
||||
|
||||
generate_test_cert:
|
||||
# generate self signing ceritificate authority
|
||||
@certstrap init --common-name "root CA" --expires "20 years"
|
||||
# generate server cerificate
|
||||
@certstrap request-cert -cn server -ip 127.0.0.1
|
||||
# self-sign server cerificate with rootCA
|
||||
@certstrap sign server --CA "root CA"
|
||||
# generate client cerificate
|
||||
@certstrap request-cert -cn client -ip 127.0.0.1
|
||||
# self-sign client cerificate with rootCA
|
||||
@certstrap sign client --CA "root CA"
|
||||
.PHONY: generate_test_cert
|
||||
|
||||
###############################################################################
|
||||
### Distribution ###
|
||||
###############################################################################
|
||||
@@ -235,7 +166,7 @@ draw_deps:
|
||||
|
||||
get_deps_bin_size:
|
||||
@# Copy of build recipe with additional flags to perform binary size analysis
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/ 2>&1))
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(BUILDDIR)/ ./cmd/tendermint/ 2>&1))
|
||||
@find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log
|
||||
@echo "Results can be found here: $(CURDIR)/deps_bin_size.log"
|
||||
.PHONY: get_deps_bin_size
|
||||
@@ -271,7 +202,7 @@ format:
|
||||
|
||||
lint:
|
||||
@echo "--> Running linter"
|
||||
@go run github.com/golangci/golangci-lint/cmd/golangci-lint run
|
||||
go run github.com/golangci/golangci-lint/cmd/golangci-lint run
|
||||
.PHONY: lint
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
@@ -279,64 +210,43 @@ DESTINATION = ./index.html.md
|
||||
###############################################################################
|
||||
### Documentation ###
|
||||
###############################################################################
|
||||
|
||||
DOCS_OUTPUT?=/tmp/tendermint-core-docs
|
||||
|
||||
# This builds a docs site for each branch/tag in `./docs/versions` and copies
|
||||
# each site to a version prefixed path. The last entry inside the `versions`
|
||||
# file will be the default root index.html. Only redirects that are built into
|
||||
# the "redirects" folder of each of the branches will be copied out to the root
|
||||
# of the build at the end.
|
||||
# todo remove once tendermint.com DNS is solved
|
||||
build-docs:
|
||||
@cd docs && \
|
||||
while read -r branch path_prefix; do \
|
||||
(git checkout $${branch} && npm ci && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \
|
||||
mkdir -p $(DOCS_OUTPUT)/$${path_prefix} ; \
|
||||
cp -r .vuepress/dist/* $(DOCS_OUTPUT)/$${path_prefix}/ ; \
|
||||
cp $(DOCS_OUTPUT)/$${path_prefix}/index.html $(DOCS_OUTPUT) ; \
|
||||
cp $(DOCS_OUTPUT)/$${path_prefix}/404.html $(DOCS_OUTPUT) ; \
|
||||
cp -r $(DOCS_OUTPUT)/$${path_prefix}/redirects/* $(DOCS_OUTPUT) || true ; \
|
||||
(git checkout $${branch} && npm install && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \
|
||||
mkdir -p ~/output/$${path_prefix} ; \
|
||||
cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \
|
||||
cp ~/output/$${path_prefix}/index.html ~/output ; \
|
||||
done < versions ;
|
||||
.PHONY: build-docs
|
||||
|
||||
# Build and serve the local version of the docs on the current branch from
|
||||
# http://0.0.0.0:8080
|
||||
serve-docs:
|
||||
@cd docs && \
|
||||
npm ci && \
|
||||
npm run serve
|
||||
.PHONY: serve-docs
|
||||
|
||||
sync-docs:
|
||||
cd ~/output && \
|
||||
echo "role_arn = ${DEPLOYMENT_ROLE_ARN}" >> /root/.aws/config ; \
|
||||
echo "CI job = ${CIRCLE_BUILD_URL}" >> version.html ; \
|
||||
aws s3 sync . s3://${WEBSITE_BUCKET} --profile terraform --delete ; \
|
||||
aws cloudfront create-invalidation --distribution-id ${CF_DISTRIBUTION_ID} --profile terraform --path "/*" ;
|
||||
.PHONY: sync-docs
|
||||
|
||||
# Verify that important design docs have ToC entries.
|
||||
check-docs-toc:
|
||||
@./docs/presubmit.sh
|
||||
.PHONY: check-docs-toc
|
||||
|
||||
###############################################################################
|
||||
### Docker image ###
|
||||
###############################################################################
|
||||
|
||||
build-docker: build-linux
|
||||
cp $(OUTPUT) DOCKER/tendermint
|
||||
cp $(BUILDDIR)/tendermint DOCKER/tendermint
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" DOCKER
|
||||
rm -rf DOCKER/tendermint
|
||||
.PHONY: build-docker
|
||||
|
||||
|
||||
###############################################################################
|
||||
### Mocks ###
|
||||
###############################################################################
|
||||
|
||||
mockery:
|
||||
go generate -run="mockery" ./...
|
||||
.PHONY: mockery
|
||||
|
||||
###############################################################################
|
||||
### Local testnet using docker ###
|
||||
###############################################################################
|
||||
|
||||
# Build linux binary on other platforms
|
||||
build-linux:
|
||||
GOOS=$(GOOS) GOARCH=$(GOARCH) GOARM=$(GOARM) $(MAKE) build
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||
.PHONY: build-linux
|
||||
|
||||
build-docker-localnode:
|
||||
@@ -380,24 +290,16 @@ contract-tests:
|
||||
dredd
|
||||
.PHONY: contract-tests
|
||||
|
||||
# Implements test splitting and running. This is pulled directly from
|
||||
# the github action workflows for better local reproducibility.
|
||||
clean:
|
||||
rm -rf $(CURDIR)/artifacts/ $(BUILDDIR)/
|
||||
|
||||
GO_TEST_FILES != find $(CURDIR) -name "*_test.go"
|
||||
|
||||
# default to four splits by default
|
||||
NUM_SPLIT ?= 4
|
||||
|
||||
$(BUILDDIR):
|
||||
mkdir -p $@
|
||||
|
||||
# The format statement filters out all packages that don't have tests.
|
||||
# Note we need to check for both in-package tests (.TestGoFiles) and
|
||||
# out-of-package tests (.XTestGoFiles).
|
||||
$(BUILDDIR)/packages.txt:$(GO_TEST_FILES) $(BUILDDIR)
|
||||
go list -f "{{ if (or .TestGoFiles .XTestGoFiles) }}{{ .ImportPath }}{{ end }}" ./... | sort > $@
|
||||
|
||||
split-test-packages:$(BUILDDIR)/packages.txt
|
||||
split -d -n l/$(NUM_SPLIT) $< $<.
|
||||
test-group-%:split-test-packages
|
||||
cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=15m -race -coverprofile=$(BUILDDIR)/$*.profile.out
|
||||
build-reproducible:
|
||||
docker rm latest-build || true
|
||||
docker run --volume=$(CURDIR):/sources:ro \
|
||||
--env TARGET_PLATFORMS='linux/amd64 linux/arm64 darwin/amd64 windows/amd64' \
|
||||
--env APP=tendermint \
|
||||
--env COMMIT=$(shell git rev-parse --short=8 HEAD) \
|
||||
--env VERSION=$(shell git describe --tags) \
|
||||
--name latest-build cosmossdk/rbuilder:latest
|
||||
docker cp -a latest-build:/home/builder/artifacts/ $(CURDIR)/
|
||||
.PHONY: build-reproducible
|
||||
|
||||
158
PHILOSOPHY.md
158
PHILOSOPHY.md
@@ -1,158 +0,0 @@
|
||||
# Design goals
|
||||
|
||||
The design goals for Tendermint (and the SDK and related libraries) are:
|
||||
|
||||
* Simplicity and Legibility
|
||||
* Parallel performance, namely ability to utilize multicore architecture
|
||||
* Ability to evolve the codebase bug-free
|
||||
* Debuggability
|
||||
* Complete correctness that considers all edge cases, esp in concurrency
|
||||
* Future-proof modular architecture, message protocol, APIs, and encapsulation
|
||||
|
||||
|
||||
## Justification
|
||||
|
||||
Legibility is key to maintaining bug-free software as it evolves toward more
|
||||
optimizations, more ease of debugging, and additional features.
|
||||
|
||||
It is too easy to introduce bugs over time by replacing lines of code with
|
||||
those that may panic, which means ideally locks are unlocked by defer
|
||||
statements.
|
||||
|
||||
For example,
|
||||
|
||||
```go
|
||||
func (obj *MyObj) something() {
|
||||
mtx.Lock()
|
||||
obj.something = other
|
||||
mtx.Unlock()
|
||||
}
|
||||
```
|
||||
|
||||
It is too easy to refactor the codebase in the future to replace `other` with
|
||||
`other.String()` for example, and this may introduce a bug that causes a
|
||||
deadlock. So as much as reasonably possible, we need to be using defer
|
||||
statements, even though it introduces additional overhead.
|
||||
|
||||
If it is necessary to optimize the unlocking of mutex locks, the solution is
|
||||
more modularity via smaller functions, so that defer'd unlocks are scoped
|
||||
within a smaller function.
|
||||
|
||||
Similarly, idiomatic for-loops should always be preferred over those that use
|
||||
custom counters, because it is too easy to evolve the body of a for-loop to
|
||||
become more complicated over time, and it becomes more and more difficult to
|
||||
assess the correctness of such a for-loop by visual inspection.
|
||||
|
||||
|
||||
## On performance
|
||||
|
||||
It doesn't matter whether there are alternative implementations that are 2x or
|
||||
3x more performant, when the software doesn't work, deadlocks, or if bugs
|
||||
cannot be debugged. By taking advantage of multicore concurrency, the
|
||||
Tendermint implementation will at least be an order of magnitude within the
|
||||
range of what is theoretically possible. The design philosophy of Tendermint,
|
||||
and the choice of Go as implementation language, is designed to make Tendermint
|
||||
implementation the standard specification for concurrent BFT software.
|
||||
|
||||
By focusing on the message protocols (e.g. ABCI, p2p messages), and
|
||||
encapsulation e.g. IAVL module, (relatively) independent reactors, we are both
|
||||
implementing a standard implementation to be used as the specification for
|
||||
future implementations in more optimizable languages like Rust, Java, and C++;
|
||||
as well as creating sufficiently performant software. Tendermint Core will
|
||||
never be as fast as future implementations of the Tendermint Spec, because Go
|
||||
isn't designed to be as fast as possible. The advantage of using Go is that we
|
||||
can develop the whole stack of modular components **faster** than in other
|
||||
languages.
|
||||
|
||||
Furthermore, the real bottleneck is in the application layer, and it isn't
|
||||
necessary to support more than a sufficiently decentralized set of validators
|
||||
(e.g. 100 ~ 300 validators is sufficient, with delegated bonded PoS).
|
||||
|
||||
Instead of optimizing Tendermint performance down to the metal, lets focus on
|
||||
optimizing on other matters, namely ability to push feature complete software
|
||||
that works well enough, can be debugged and maintained, and can serve as a spec
|
||||
for future implementations.
|
||||
|
||||
|
||||
## On encapsulation
|
||||
|
||||
In order to create maintainable, forward-optimizable software, it is critical
|
||||
to develop well-encapsulated objects that have well understood properties, and
|
||||
to re-use these easy-to-use-correctly components as building blocks for further
|
||||
encapsulated meta-objects.
|
||||
|
||||
For example, mutexes are cheap enough for Tendermint's design goals when there
|
||||
isn't goroutine contention, so it is encouraged to create concurrency safe
|
||||
structures with struct-level mutexes. If they are used in the context of
|
||||
non-concurrent logic, then the performance is good enough. If they are used in
|
||||
the context of concurrent logic, then it will still perform correctly.
|
||||
|
||||
Examples of this design principle can be seen in the types.ValidatorSet struct,
|
||||
and the rand.Rand struct. It's one single struct declaration that can be used
|
||||
in both concurrent and non-concurrent logic, and due to its well encapsulation,
|
||||
it's easy to get the usage of the mutex right.
|
||||
|
||||
### example: rand.Rand
|
||||
|
||||
`The default Source is safe for concurrent use by multiple goroutines, but
|
||||
Sources created by NewSource are not`. The reason why the default
|
||||
package-level source is safe for concurrent use is because it is protected (see
|
||||
`lockedSource` in <https://golang.org/src/math/rand/rand.go>).
|
||||
|
||||
But we shouldn't rely on the global source, we should be creating our own
|
||||
Rand/Source instances and using them, especially for determinism in testing.
|
||||
So it is reasonable to have rand.Rand be protected by a mutex. Whether we want
|
||||
our own implementation of Rand is another question, but the answer there is
|
||||
also in the affirmative. Sometimes you want to know where Rand is being used
|
||||
in your code, so it becomes a simple matter of dropping in a log statement to
|
||||
inject inspectability into Rand usage. Also, it is nice to be able to extend
|
||||
the functionality of Rand with custom methods. For these reasons, and for the
|
||||
reasons which is outlined in this design philosophy document, we should
|
||||
continue to use the rand.Rand object, with mutex protection.
|
||||
|
||||
Another key aspect of good encapsulation is the choice of exposed vs unexposed
|
||||
methods. It should be clear to the reader of the code, which methods are
|
||||
intended to be used in what context, and what safe usage is. Part of this is
|
||||
solved by hiding methods via unexported methods. Another part of this is
|
||||
naming conventions on the methods (e.g. underscores) with good documentation,
|
||||
and code organization. If there are too many exposed methods and it isn't
|
||||
clear what methods have what side effects, then there is something wrong about
|
||||
the design of abstractions that should be revisited.
|
||||
|
||||
|
||||
## On concurrency
|
||||
|
||||
In order for Tendermint to remain relevant in the years to come, it is vital
|
||||
for Tendermint to take advantage of multicore architectures. Due to the nature
|
||||
of the problem, namely consensus across a concurrent p2p gossip network, and to
|
||||
handle RPC requests for a large number of consuming subscribers, it is
|
||||
unavoidable for Tendermint development to require expertise in concurrency
|
||||
design, especially when it comes to the reactor design, and also for RPC
|
||||
request handling.
|
||||
|
||||
|
||||
# Guidelines
|
||||
|
||||
Here are some guidelines for designing for (sufficient) performance and concurrency:
|
||||
|
||||
* Mutex locks are cheap enough when there isn't contention.
|
||||
* Do not optimize code without analytical or observed proof that it is in a hot path.
|
||||
* Don't over-use channels when mutex locks w/ encapsulation are sufficient.
|
||||
* The need to drain channels are often a hint of unconsidered edge cases.
|
||||
* The creation of O(N) one-off goroutines is generally technical debt that
|
||||
needs to get addressed sooner than later. Avoid creating too many
|
||||
goroutines as a patch around incomplete concurrency design, or at least be
|
||||
aware of the debt and do not invest in the debt. On the other hand, Tendermint
|
||||
is designed to have a limited number of peers (e.g. 10 or 20), so the creation
|
||||
of O(C) goroutines per O(P) peers is still O(C\*P=constant).
|
||||
* Use defer statements to unlock as much as possible. If you want to unlock sooner,
|
||||
try to create more modular functions that do make use of defer statements.
|
||||
|
||||
# Mantras
|
||||
|
||||
* Premature optimization kills
|
||||
* Readability is paramount
|
||||
* Beautiful is better than fast.
|
||||
* In the face of ambiguity, refuse the temptation to guess.
|
||||
* In the face of bugs, refuse the temptation to cover the bug.
|
||||
* There should be one-- and preferably only one --obvious way to do it.
|
||||
213
README.md
213
README.md
@@ -2,143 +2,152 @@
|
||||
|
||||

|
||||
|
||||
[Byzantine-Fault Tolerant][bft] [State Machine Replication][smr]. Or
|
||||
[Blockchain], for short.
|
||||
[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance)
|
||||
[State Machines](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for short.
|
||||
|
||||
[![Version][version-badge]][version-url]
|
||||
[![API Reference][api-badge]][api-url]
|
||||
[![Go version][go-badge]][go-url]
|
||||
[![Discord chat][discord-badge]][discord-url]
|
||||
[![License][license-badge]][license-url]
|
||||
[![Sourcegraph][sg-badge]][sg-url]
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
[](https://pkg.go.dev/github.com/tendermint/tendermint)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://discord.gg/vcExX9T)
|
||||
[](https://github.com/tendermint/tendermint/blob/master/LICENSE)
|
||||
[](https://github.com/tendermint/tendermint)
|
||||
[](https://sourcegraph.com/github.com/tendermint/tendermint?badge)
|
||||
|
||||
| Branch | Tests | Linting |
|
||||
|--------|------------------------------------|---------------------------------|
|
||||
| main | [![Tests][tests-badge]][tests-url] | [![Lint][lint-badge]][lint-url] |
|
||||
| Branch | Tests | Coverage | Linting |
|
||||
|--------|--------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------|
|
||||
| master |  | [](https://codecov.io/gh/tendermint/tendermint) |  |
|
||||
|
||||
Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a
|
||||
state transition machine - written in any programming language - and securely
|
||||
replicates it on many machines.
|
||||
Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely replicates it on many machines.
|
||||
|
||||
For protocol details, refer to the [Tendermint Specification](./spec/README.md).
|
||||
For protocol details, see [the specification](https://github.com/tendermint/spec).
|
||||
|
||||
For detailed analysis of the consensus protocol, including safety and liveness
|
||||
proofs, read our paper, "[The latest gossip on BFT
|
||||
consensus](https://arxiv.org/abs/1807.04938)".
|
||||
|
||||
## Documentation
|
||||
|
||||
Complete documentation can be found on the
|
||||
[website](https://docs.tendermint.com/).
|
||||
For detailed analysis of the consensus protocol, including safety and liveness proofs,
|
||||
see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
|
||||
|
||||
## Releases
|
||||
|
||||
Please do not depend on `main` as your production branch. Use
|
||||
[releases](https://github.com/tendermint/tendermint/releases) instead.
|
||||
Please do not depend on master as your production branch. Use [releases](https://github.com/tendermint/tendermint/releases) instead.
|
||||
|
||||
Tendermint has been in the production of private and public environments, most
|
||||
notably the blockchains of the Cosmos Network. we haven't released v1.0 yet
|
||||
since we are making breaking changes to the protocol and the APIs. See below for
|
||||
more details about [versioning](#versioning).
|
||||
Tendermint has been in the production of private and public environments, most notably the blockchains of the Cosmos Network. we haven't released v1.0 yet since we are making breaking changes to the protocol and the APIs.
|
||||
See below for more details about [versioning](#versioning).
|
||||
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help.
|
||||
You can contact us [over email](mailto:hello@interchain.io) or [join the
|
||||
chat](https://discord.gg/cosmosnetwork).
|
||||
|
||||
More on how releases are conducted can be found [here](./RELEASES.md).
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help. You can
|
||||
contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/vcExX9T).
|
||||
|
||||
## Security
|
||||
|
||||
To report a security vulnerability, see our [bug bounty
|
||||
program](https://hackerone.com/cosmos). For examples of the kinds of bugs we're
|
||||
looking for, see [our security policy](SECURITY.md).
|
||||
program](https://hackerone.com/tendermint).
|
||||
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md).
|
||||
|
||||
We also maintain a dedicated mailing list for security updates. We will only
|
||||
ever use this mailing list to notify you of vulnerabilities and fixes in
|
||||
Tendermint Core. You can subscribe [here](http://eepurl.com/gZ5hQD).
|
||||
We also maintain a dedicated mailing list for security updates. We will only ever use this mailing list
|
||||
to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe [here](http://eepurl.com/gZ5hQD).
|
||||
|
||||
## Minimum requirements
|
||||
|
||||
| Requirement | Notes |
|
||||
|-------------|-------------------|
|
||||
| Go version | Go 1.18 or higher |
|
||||
| Requirement | Notes |
|
||||
|-------------|------------------|
|
||||
| Go version | Go1.16 or higher |
|
||||
|
||||
## Documentation
|
||||
|
||||
Complete documentation can be found on the [website](https://docs.tendermint.com/master/).
|
||||
|
||||
### Install
|
||||
|
||||
See the [install instructions](./docs/introduction/install.md).
|
||||
See the [install instructions](/docs/introduction/install.md).
|
||||
|
||||
### Quick Start
|
||||
|
||||
- [Single node](./docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](./docs/networks/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](./docs/networks/terraform-and-ansible.md)
|
||||
- [Single node](/docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](/docs/networks/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](/docs/networks/terraform-and-ansible.md)
|
||||
- [Join the Cosmos testnet](https://cosmos.network/testnet)
|
||||
|
||||
## Contributing
|
||||
|
||||
Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions.
|
||||
|
||||
Before contributing to the project, please take a look at the [contributing
|
||||
guidelines](CONTRIBUTING.md) and the [style guide](STYLE_GUIDE.md). You may also
|
||||
find it helpful to read the [specifications](./spec/README.md), and familiarize
|
||||
yourself with our [Architectural Decision Records
|
||||
(ADRs)](./docs/architecture/README.md) and
|
||||
[Request For Comments (RFCs)](./docs/rfc/README.md).
|
||||
Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md)
|
||||
and the [style guide](STYLE_GUIDE.md). You may also find it helpful to read the
|
||||
[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md),
|
||||
and familiarize yourself with our
|
||||
[Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture).
|
||||
|
||||
## Versioning
|
||||
|
||||
### Semantic Versioning
|
||||
|
||||
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and
|
||||
how the version changes. According to SemVer, anything in the public API can
|
||||
change at any time before version 1.0.0
|
||||
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and how the version changes.
|
||||
According to SemVer, anything in the public API can change at any time before version 1.0.0
|
||||
|
||||
To provide some stability to users of 0.X.X versions of Tendermint, the MINOR
|
||||
version is used to signal breaking changes across Tendermint's API. This API
|
||||
includes all publicly exposed types, functions, and methods in non-internal Go
|
||||
packages as well as the types and methods accessible via the Tendermint RPC
|
||||
interface.
|
||||
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
|
||||
to signal breaking changes across a subset of the total public API. This subset includes all
|
||||
interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not
|
||||
include the Go APIs.
|
||||
|
||||
Breaking changes to these public APIs will be documented in the CHANGELOG.
|
||||
That said, breaking changes in the following packages will be documented in the
|
||||
CHANGELOG even if they don't lead to MINOR version bumps:
|
||||
|
||||
- crypto
|
||||
- config
|
||||
- libs
|
||||
- bits
|
||||
- bytes
|
||||
- json
|
||||
- log
|
||||
- math
|
||||
- net
|
||||
- os
|
||||
- protoio
|
||||
- rand
|
||||
- sync
|
||||
- strings
|
||||
- service
|
||||
- node
|
||||
- rpc/client
|
||||
- types
|
||||
|
||||
### Upgrades
|
||||
|
||||
In an effort to avoid accumulating technical debt prior to 1.0.0, we do not
|
||||
guarantee that breaking changes (ie. bumps in the MINOR version) will work with
|
||||
existing Tendermint blockchains. In these cases you will have to start a new
|
||||
blockchain, or write something custom to get the old data into the new chain.
|
||||
However, any bump in the PATCH version should be compatible with existing
|
||||
blockchain histories.
|
||||
In an effort to avoid accumulating technical debt prior to 1.0.0,
|
||||
we do not guarantee that breaking changes (ie. bumps in the MINOR version)
|
||||
will work with existing Tendermint blockchains. In these cases you will
|
||||
have to start a new blockchain, or write something custom to get the old
|
||||
data into the new chain. However, any bump in the PATCH version should be
|
||||
compatible with existing blockchain histories.
|
||||
|
||||
|
||||
For more information on upgrading, see [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
### Supported Versions
|
||||
|
||||
Because we are a small core team, we have limited capacity to ship patch
|
||||
updates, including security updates. Consequently, we strongly recommend keeping
|
||||
Tendermint up-to-date. Upgrading instructions can be found in
|
||||
[UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
Currently supported versions include:
|
||||
|
||||
- v0.34.x
|
||||
- v0.37.x (release candidate)
|
||||
Because we are a small core team, we only ship patch updates, including security updates,
|
||||
to the most recent minor release and the second-most recent minor release. Consequently,
|
||||
we strongly recommend keeping Tendermint up-to-date. Upgrading instructions can be found
|
||||
in [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
## Resources
|
||||
|
||||
### Libraries
|
||||
### Tendermint Core
|
||||
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); A framework for building
|
||||
applications in Golang
|
||||
- [Tendermint in Rust](https://github.com/informalsystems/tendermint-rs)
|
||||
- [ABCI Tower](https://github.com/penumbra-zone/tower-abci)
|
||||
For details about the blockchain data structures and the p2p protocols, see the
|
||||
[Tendermint specification](https://docs.tendermint.com/master/spec/).
|
||||
|
||||
For details on using the software, see the [documentation](/docs/) which is also
|
||||
hosted at: <https://docs.tendermint.com/master/>
|
||||
|
||||
### Tools
|
||||
|
||||
Benchmarking is provided by [`tm-load-test`](https://github.com/informalsystems/tm-load-test).
|
||||
Additional tooling can be found in [/docs/tools](/docs/tools).
|
||||
|
||||
### Applications
|
||||
|
||||
- [Cosmos Hub](https://hub.cosmos.network/)
|
||||
- [Terra](https://www.terra.money/)
|
||||
- [Celestia](https://celestia.org/)
|
||||
- [Anoma](https://anoma.network/)
|
||||
- [Vocdoni](https://docs.vocdoni.io/)
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
||||
- [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
|
||||
- [Many more](https://tendermint.com/ecosystem)
|
||||
|
||||
### Research
|
||||
|
||||
@@ -150,31 +159,9 @@ Currently supported versions include:
|
||||
|
||||
## Join us!
|
||||
|
||||
Tendermint Core is maintained by [Interchain GmbH](https://interchain.io).
|
||||
If you'd like to work full-time on Tendermint Core,
|
||||
[we're hiring](https://interchain-gmbh.breezy.hr/)!
|
||||
Tendermint Core is maintained by [Interchain GmbH](https://interchain.berlin).
|
||||
If you'd like to work full-time on Tendermint Core, [we're hiring](https://interchain-gmbh.breezy.hr/p/682fb7e8a6f601-software-engineer-tendermint-core)!
|
||||
|
||||
Funding for Tendermint Core development comes primarily from the
|
||||
[Interchain Foundation](https://interchain.io), a Swiss non-profit. The
|
||||
Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the
|
||||
for-profit entity that also maintains [tendermint.com](https://tendermint.com).
|
||||
|
||||
[bft]: https://en.wikipedia.org/wiki/Byzantine_fault_tolerance
|
||||
[smr]: https://en.wikipedia.org/wiki/State_machine_replication
|
||||
[Blockchain]: https://en.wikipedia.org/wiki/Blockchain
|
||||
[version-badge]: https://img.shields.io/github/v/release/tendermint/tendermint.svg
|
||||
[version-url]: https://github.com/tendermint/tendermint/releases/latest
|
||||
[api-badge]: https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
|
||||
[api-url]: https://pkg.go.dev/github.com/tendermint/tendermint
|
||||
[go-badge]: https://img.shields.io/badge/go-1.18-blue.svg
|
||||
[go-url]: https://github.com/moovweb/gvm
|
||||
[discord-badge]: https://img.shields.io/discord/669268347736686612.svg
|
||||
[discord-url]: https://discord.gg/cosmosnetwork
|
||||
[license-badge]: https://img.shields.io/github/license/tendermint/tendermint.svg
|
||||
[license-url]: https://github.com/tendermint/tendermint/blob/main/LICENSE
|
||||
[sg-badge]: https://sourcegraph.com/github.com/tendermint/tendermint/-/badge.svg
|
||||
[sg-url]: https://sourcegraph.com/github.com/tendermint/tendermint?badge
|
||||
[tests-url]: https://github.com/tendermint/tendermint/actions/workflows/tests.yml
|
||||
[tests-badge]: https://github.com/tendermint/tendermint/actions/workflows/tests.yml/badge.svg?branch=main
|
||||
[lint-badge]: https://github.com/tendermint/tendermint/actions/workflows/lint.yml/badge.svg
|
||||
[lint-url]: https://github.com/tendermint/tendermint/actions/workflows/lint.yml
|
||||
Funding for Tendermint Core development comes primarily from the [Interchain Foundation](https://interchain.io),
|
||||
a Swiss non-profit. The Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the for-profit entity
|
||||
that also maintains [tendermint.com](https://tendermint.com).
|
||||
|
||||
371
RELEASES.md
371
RELEASES.md
@@ -1,371 +0,0 @@
|
||||
# Releases
|
||||
|
||||
Tendermint uses modified [semantic versioning](https://semver.org/) with each
|
||||
release following a `vX.Y.Z` format. Tendermint is currently on major version 0
|
||||
and uses the minor version to signal breaking changes. The `main` branch is
|
||||
used for active development and thus it is not advisable to build against it.
|
||||
|
||||
The latest changes are always initially merged into `main`. Releases are
|
||||
specified using tags and are built from long-lived "backport" branches that are
|
||||
cut from `main` when the release process begins. Each release "line" (e.g.
|
||||
0.34 or 0.33) has its own long-lived backport branch, and the backport branches
|
||||
have names like `v0.34.x` or `v0.33.x` (literally, `x`; it is not a placeholder
|
||||
in this case). Tendermint only maintains the last two releases at a time (the
|
||||
oldest release is predominantly just security patches).
|
||||
|
||||
## Backporting
|
||||
|
||||
As non-breaking changes land on `main`, they should also be backported to
|
||||
these backport branches.
|
||||
|
||||
We use Mergify's [backport feature](https://mergify.io/features/backports) to
|
||||
automatically backport to the needed branch. There should be a label for any
|
||||
backport branch that you'll be targeting. To notify the bot to backport a pull
|
||||
request, mark the pull request with the label corresponding to the correct
|
||||
backport branch. For example, to backport to v0.38.x, add the label
|
||||
`S:backport-to-v0.38.x`. Once the original pull request is merged, the bot will
|
||||
try to cherry-pick the pull request to the backport branch. If the bot fails to
|
||||
backport, it will open a pull request. The author of the original pull request
|
||||
is responsible for solving the conflicts and merging the pull request.
|
||||
|
||||
### Creating a backport branch
|
||||
|
||||
If this is the first release candidate for a minor version release, e.g.
|
||||
v0.25.0, you get to have the honor of creating the backport branch!
|
||||
|
||||
Note that, after creating the backport branch, you'll also need to update the
|
||||
tags on `main` so that `go mod` is able to order the branches correctly. You
|
||||
should tag `main` with a "dev" tag that is "greater than" the backport
|
||||
branches tags. See [#6072](https://github.com/tendermint/tendermint/pull/6072)
|
||||
for more context.
|
||||
|
||||
In the following example, we'll assume that we're making a backport branch for
|
||||
the 0.38.x line.
|
||||
|
||||
1. Start on `main`
|
||||
|
||||
2. Ensure that there is a [branch protection
|
||||
rule](https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/defining-the-mergeability-of-pull-requests/managing-a-branch-protection-rule) for the
|
||||
branch you are about to create (you will need admin access to the repository
|
||||
in order to do this).
|
||||
|
||||
3. Create and push the backport branch:
|
||||
```sh
|
||||
git checkout -b v0.38.x
|
||||
git push origin v0.38.x
|
||||
```
|
||||
|
||||
4. Create a PR to update the documentation directory for the backport branch.
|
||||
|
||||
We rewrite any URLs pointing to `main` to point to the backport branch,
|
||||
so that generated documentation will link to the correct versions of files
|
||||
elsewhere in the repository. The following files are to be excluded from this
|
||||
search:
|
||||
|
||||
* [`README.md`](./README.md)
|
||||
* [`CHANGELOG.md`](./CHANGELOG.md)
|
||||
* [`UPGRADING.md`](./UPGRADING.md)
|
||||
|
||||
The following links are to always point to `main`, regardless of where they
|
||||
occur in the codebase:
|
||||
|
||||
* `https://github.com/tendermint/tendermint/blob/main/LICENSE`
|
||||
|
||||
Be sure to search for all of the following links and replace `main` with your
|
||||
corresponding branch label or version (e.g. `v0.38.x` or `v0.38`):
|
||||
|
||||
* `github.com/tendermint/tendermint/blob/main` ->
|
||||
`github.com/tendermint/tendermint/blob/v0.38.x`
|
||||
* `github.com/tendermint/tendermint/tree/main` ->
|
||||
`github.com/tendermint/tendermint/tree/v0.38.x`
|
||||
* `docs.tendermint.com/main` -> `docs.tendermint.com/v0.38`
|
||||
|
||||
Once you have updated all of the relevant documentation:
|
||||
```sh
|
||||
# Create and push the PR.
|
||||
git checkout -b update-docs-v038x
|
||||
git commit -m "Update docs for v0.38.x backport branch."
|
||||
git push -u origin update-docs-v038x
|
||||
```
|
||||
|
||||
Be sure to merge this PR before making other changes on the newly-created
|
||||
backport branch.
|
||||
|
||||
After doing these steps, go back to `main` and do the following:
|
||||
|
||||
1. Create a new workflow to run e2e nightlies for the new backport branch. (See
|
||||
[e2e-nightly-main.yml][e2e] for an example.)
|
||||
|
||||
2. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the
|
||||
backport bot to work on this branch, and add a corresponding `S:backport-to-v0.38.x`
|
||||
[label](https://github.com/tendermint/tendermint/labels) so the bot can be triggered.
|
||||
|
||||
3. Add a new section to the Dependabot config (`.github/dependabot.yml`) to
|
||||
enable automatic update of Go dependencies on this branch. Copy and edit one
|
||||
of the existing branch configurations to set the correct `target-branch`.
|
||||
|
||||
[e2e]: https://github.com/tendermint/tendermint/blob/main/.github/workflows/e2e-nightly-main.yml
|
||||
|
||||
## Pre-releases
|
||||
|
||||
Before creating an official release, especially a minor release, we may want to
|
||||
create an alpha or beta version, or release candidate (RC) for our friends and
|
||||
partners to test out. We use git tags to create pre-releases, and we build them
|
||||
off of backport branches, for example:
|
||||
|
||||
- `v0.38.0-alpha.1` - The first alpha release of `v0.38.0`. Subsequent alpha
|
||||
releases will be numbered `v0.38.0-alpha.2`, `v0.38.0-alpha.3`, etc.
|
||||
|
||||
Alpha releases are to be considered the _most_ unstable of pre-releases, and
|
||||
are most likely not yet properly QA'd. These are made available to allow early
|
||||
adopters to start integrating and testing new functionality before we're done
|
||||
with QA.
|
||||
|
||||
- `v0.38.0-beta.1` - The first beta release of `v0.38.0`. Subsequent beta
|
||||
releases will be numbered `v0.38.0-beta.2`, `v0.38.0-beta.3`, etc.
|
||||
|
||||
Beta releases can be considered more stable than alpha releases in that we
|
||||
will have QA'd them better than alpha releases, but there still may be
|
||||
minor breaking API changes if users have strong demands for such changes.
|
||||
|
||||
- `v0.38.0-rc1` - The first release candidate (RC) of `v0.38.0`. Subsequent RCs
|
||||
will be numbered `v0.38.0-rc2`, `v0.38.0-rc3`, etc.
|
||||
|
||||
RCs are considered more stable than beta releases in that we will have
|
||||
completed our QA on them. APIs will most likely be stable at this point. The
|
||||
difference between an RC and a release is that there may still be small
|
||||
changes (bug fixes, features) that may make their way into the series before
|
||||
cutting a final release.
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important
|
||||
that these branches have distinct names from the tags/release names.)
|
||||
|
||||
If this is the first pre-release for a minor release, you'll have to make a new
|
||||
backport branch (see above). Otherwise:
|
||||
|
||||
1. Start from the backport branch (e.g. `v0.38.x`).
|
||||
2. Run the integration tests and the E2E nightlies
|
||||
(which can be triggered from the GitHub UI;
|
||||
e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-37x.yml).
|
||||
3. Prepare the pre-release documentation:
|
||||
- Ensure that all relevant changes are in the `CHANGELOG_PENDING.md` file.
|
||||
This file's contents must only be included in the `CHANGELOG.md` when we
|
||||
cut final releases.
|
||||
- Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
4. Prepare the versioning:
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary.
|
||||
Check the changelog for breaking changes in these components.
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
5. Open a PR with these changes against the backport branch.
|
||||
6. Once these changes have landed on the backport branch, be sure to pull them back down locally.
|
||||
7. Once you have the changes locally, create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.38.0-rc1 -m "Release Candidate v0.38.0-rc1`
|
||||
8. Push the tag back up to origin:
|
||||
`git push origin v0.38.0-rc1`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
9. Future pre-releases will continue to be built off of this branch.
|
||||
|
||||
## Minor release
|
||||
|
||||
This minor release process assumes that this release was preceded by release
|
||||
candidates. If there were no release candidates, begin by creating a backport
|
||||
branch, as described above.
|
||||
|
||||
Before performing these steps, be sure the
|
||||
[Minor Release Checklist](#minor-release-checklist) has been completed.
|
||||
|
||||
1. Start on the backport branch (e.g. `v0.38.x`)
|
||||
2. Run integration tests (`make test_integrations`) and the e2e nightlies.
|
||||
3. Prepare the release:
|
||||
- "Squash" changes from the changelog entries for the pre-releases into a
|
||||
single entry, and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
(Squashing includes both combining all entries, as well as removing or
|
||||
simplifying any intra-pre-release changes. It may also help to alphabetize
|
||||
the entries by package name.)
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes are on the backport branch, push a tag with prepared release details.
|
||||
This will trigger the actual release `v0.38.0`.
|
||||
- `git tag -a v0.38.0 -m 'Release v0.38.0'`
|
||||
- `git push origin v0.38.0`
|
||||
6. Make sure that `main` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
7. Add the release to the documentation site generator config (see
|
||||
[DOCS\_README.md](./docs/DOCS_README.md) for more details). In summary:
|
||||
- Start on branch `main`.
|
||||
- Add a new line at the bottom of [`docs/versions`](./docs/versions) to
|
||||
ensure the newest release is the default for the landing page.
|
||||
- Add a new entry to `themeConfig.versions` in
|
||||
[`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the
|
||||
release in the dropdown versions menu.
|
||||
- Commit these changes to `main` and backport them into the backport
|
||||
branch for this release.
|
||||
|
||||
## Patch release
|
||||
|
||||
Patch releases are done differently from minor releases: They are built off of
|
||||
long-lived backport branches, rather than from main. As non-breaking changes
|
||||
land on `main`, they should also be backported into these backport branches.
|
||||
|
||||
Patch releases don't have release candidates by default, although any tricky
|
||||
changes may merit a release candidate.
|
||||
|
||||
To create a patch release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout v0.38.x`
|
||||
2. Run integration tests (`make test_integrations`) and the nightlies.
|
||||
3. Check out a new branch and prepare the release:
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump the TMDefaultVersion in `version.go`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during patch releases, and only field additions are valid patch changes.)
|
||||
4. Open a PR with these changes that will land them back on `v0.38.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a v0.38.1 -m 'Release v0.38.1'`
|
||||
- `git push origin v0.38.1`
|
||||
6. Create a pull request back to main with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:patch` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into main.
|
||||
|
||||
## Minor Release Checklist
|
||||
|
||||
The following set of steps are performed on all releases that increment the
|
||||
_minor_ version, e.g. v0.25 to v0.26. These steps ensure that Tendermint is well
|
||||
tested, stable, and suitable for adoption by the various diverse projects that
|
||||
rely on Tendermint.
|
||||
|
||||
### Feature Freeze
|
||||
|
||||
Ahead of any minor version release of Tendermint, the software enters 'Feature
|
||||
Freeze' for at least two weeks. A feature freeze means that _no_ new features
|
||||
are added to the code being prepared for release. No code changes should be made
|
||||
to the code being released that do not directly improve pressing issues of code
|
||||
quality. The following must not be merged during a feature freeze:
|
||||
|
||||
* Refactors that are not related to specific bug fixes.
|
||||
* Dependency upgrades.
|
||||
* New test code that does not test a discovered regression.
|
||||
* New features of any kind.
|
||||
* Documentation or spec improvements that are not related to the newly developed
|
||||
code.
|
||||
|
||||
This period directly follows the creation of the [backport
|
||||
branch](#creating-a-backport-branch). The Tendermint team instead directs all
|
||||
attention to ensuring that the existing code is stable and reliable. Broken
|
||||
tests are fixed, flakey-tests are remedied, end-to-end test failures are
|
||||
thoroughly diagnosed and all efforts of the team are aimed at improving the
|
||||
quality of the code. During this period, the upgrade harness tests are run
|
||||
repeatedly and a variety of in-house testnets are run to ensure Tendermint
|
||||
functions at the scale it will be used by application developers and node
|
||||
operators.
|
||||
|
||||
### Nightly End-To-End Tests
|
||||
|
||||
The Tendermint team maintains [a set of end-to-end
|
||||
tests](https://github.com/tendermint/tendermint/blob/main/test/e2e/README.md#L1)
|
||||
that run each night on the latest commit of the project and on the code in the
|
||||
tip of each supported backport branch. These tests start a network of
|
||||
containerized Tendermint processes and run automated checks that the network
|
||||
functions as expected in both stable and unstable conditions. During the feature
|
||||
freeze, these tests are run nightly and must pass consistently for a release of
|
||||
Tendermint to be considered stable.
|
||||
|
||||
### Upgrade Harness
|
||||
|
||||
> TODO(williambanfield): Change to past tense and clarify this section once
|
||||
> upgrade harness is complete.
|
||||
|
||||
The Tendermint team is creating an upgrade test harness to exercise the workflow
|
||||
of stopping an instance of Tendermint running one version of the software and
|
||||
starting up the same application running the next version. To support upgrade
|
||||
testing, we will add the ability to terminate the Tendermint process at specific
|
||||
pre-defined points in its execution so that we can verify upgrades work in a
|
||||
representative sample of stop conditions.
|
||||
|
||||
### Large Scale Testnets
|
||||
|
||||
The Tendermint end-to-end tests run a small network (~10s of nodes) to exercise
|
||||
basic consensus interactions. Real world deployments of Tendermint often have
|
||||
over a hundred nodes just in the validator set, with many others acting as full
|
||||
nodes and sentry nodes. To gain more assurance before a release, we will also
|
||||
run larger-scale test networks to shake out emergent behaviors at scale.
|
||||
|
||||
Large-scale test networks are run on a set of virtual machines (VMs). Each VM is
|
||||
equipped with 4 Gigabytes of RAM and 2 CPU cores. The network runs a very simple
|
||||
key-value store application. The application adds artificial delays to different
|
||||
ABCI calls to simulate a slow application. Each testnet is briefly run with no
|
||||
load being generated to collect a baseline performance. Once baseline is
|
||||
captured, a consistent load is applied across the network. This load takes the
|
||||
form of 10% of the running nodes all receiving a consistent stream of two
|
||||
hundred transactions per minute each.
|
||||
|
||||
During each test net, the following metrics are monitored and collected on each
|
||||
node:
|
||||
|
||||
* Consensus rounds per height
|
||||
* Maximum connected peers, Minimum connected peers, Rate of change of peer connections
|
||||
* Memory resident set size
|
||||
* CPU utilization
|
||||
* Blocks produced per minute
|
||||
* Seconds for each step of consensus (Propose, Prevote, Precommit, Commit)
|
||||
* Latency to receive block proposals
|
||||
|
||||
For these tests we intentionally target low-powered host machines (with low core
|
||||
counts and limited memory) to ensure we observe similar kinds of resource contention
|
||||
and limitation that real-world deployments of Tendermint experience in production.
|
||||
|
||||
#### 200 Node Testnet
|
||||
|
||||
To test the stability and performance of Tendermint in a real world scenario,
|
||||
a 200 node test network is run. The network comprises 5 seed nodes, 100
|
||||
validators and 95 non-validating full nodes. All nodes begin by dialing
|
||||
a subset of the seed nodes to discover peers. The network is run for several
|
||||
days, with metrics being collected continuously. In cases of changes to performance
|
||||
critical systems, testnets of larger sizes should be considered.
|
||||
|
||||
#### Rotating Node Testnet
|
||||
|
||||
Real-world deployments of Tendermint frequently see new nodes arrive and old
|
||||
nodes exit the network. The rotating node testnet ensures that Tendermint is
|
||||
able to handle this reliably. In this test, a network with 10 validators and
|
||||
3 seed nodes is started. A rolling set of 25 full nodes are started and each
|
||||
connects to the network by dialing one of the seed nodes. Once the node is able
|
||||
to blocksync to the head of the chain and begins producing blocks using
|
||||
Tendermint consensus it is stopped. Once stopped, a new node is started and
|
||||
takes its place. This network is run for several days.
|
||||
|
||||
#### Network Partition Testnet
|
||||
|
||||
Tendermint is expected to recover from network partitions. A partition where no
|
||||
subset of the nodes is left with the super-majority of the stake is expected to
|
||||
stop making blocks. Upon alleviation of the partition, the network is expected
|
||||
to once again become fully connected and capable of producing blocks. The
|
||||
network partition testnet ensures that Tendermint is able to handle this
|
||||
reliably at scale. In this test, a network with 100 validators and 95 full
|
||||
nodes is started. All validators have equal stake. Once the network is
|
||||
producing blocks, a set of firewall rules is deployed to create a partitioned
|
||||
network with 50% of the stake on one side and 50% on the other. Once the
|
||||
network stops producing blocks, the firewall rules are removed and the nodes
|
||||
are monitored to ensure they reconnect and that the network again begins
|
||||
producing blocks.
|
||||
|
||||
#### Absent Stake Testnet
|
||||
|
||||
Tendermint networks often run with _some_ portion of the voting power offline.
|
||||
The absent stake testnet ensures that large networks are able to handle this
|
||||
reliably. A set of 150 validator nodes and three seed nodes is started. The set
|
||||
of 150 validators is configured to only possess a cumulative stake of 67% of
|
||||
the total stake. The remaining 33% of the stake is configured to belong to
|
||||
a validator that is never actually run in the test network. The network is run
|
||||
for multiple days, ensuring that it is able to produce blocks without issue.
|
||||
113
SECURITY.md
113
SECURITY.md
@@ -2,83 +2,51 @@
|
||||
|
||||
## Reporting a Bug
|
||||
|
||||
As part of our [Coordinated Vulnerability Disclosure Policy](https://tendermint.com/security),
|
||||
we operate a [bug bounty][hackerone]. See the policy for more
|
||||
details on submissions and rewards, and see "Example Vulnerabilities" (below)
|
||||
for examples of the kinds of bugs we're most interested in.
|
||||
As part of our [Coordinated Vulnerability Disclosure
|
||||
Policy](https://tendermint.com/security), we operate a [bug
|
||||
bounty](https://hackerone.com/tendermint).
|
||||
See the policy for more details on submissions and rewards, and see "Example Vulnerabilities" (below) for examples of the kinds of bugs we're most interested in.
|
||||
|
||||
### Guidelines
|
||||
|
||||
We require that all researchers:
|
||||
|
||||
* Use the bug bounty to disclose all vulnerabilities, and avoid posting
|
||||
vulnerability information in public places, including Github Issues, Discord
|
||||
channels, and Telegram groups
|
||||
* Make every effort to avoid privacy violations, degradation of user experience,
|
||||
disruption to production systems (including but not limited to the Cosmos
|
||||
Hub), and destruction of data
|
||||
* Keep any information about vulnerabilities that you’ve discovered confidential
|
||||
between yourself and the Tendermint Core engineering team until the issue has
|
||||
been resolved and disclosed
|
||||
* Use the bug bounty to disclose all vulnerabilities, and avoid posting vulnerability information in public places, including Github Issues, Discord channels, and Telegram groups
|
||||
* Make every effort to avoid privacy violations, degradation of user experience, disruption to production systems (including but not limited to the Cosmos Hub), and destruction of data
|
||||
* Keep any information about vulnerabilities that you’ve discovered confidential between yourself and the Tendermint Core engineering team until the issue has been resolved and disclosed
|
||||
* Avoid posting personally identifiable information, privately or publicly
|
||||
|
||||
If you follow these guidelines when reporting an issue to us, we commit to:
|
||||
|
||||
* Not pursue or support any legal action related to your research on this
|
||||
vulnerability
|
||||
* Work with you to understand, resolve and ultimately disclose the issue in a
|
||||
timely fashion
|
||||
* Not pursue or support any legal action related to your research on this vulnerability
|
||||
* Work with you to understand, resolve and ultimately disclose the issue in a timely fashion
|
||||
|
||||
## Disclosure Process
|
||||
|
||||
Tendermint Core uses the following disclosure process:
|
||||
|
||||
1. Once a security report is received, the Tendermint Core team works to verify
|
||||
the issue and confirm its severity level using CVSS.
|
||||
2. The Tendermint Core team collaborates with the Gaia team to determine the
|
||||
vulnerability’s potential impact on the Cosmos Hub.
|
||||
3. Patches are prepared for eligible releases of Tendermint in private
|
||||
repositories. See “Supported Releases” below for more information on which
|
||||
releases are considered eligible.
|
||||
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE
|
||||
Numbering Authority.
|
||||
5. We notify the community that a security release is coming, to give users time
|
||||
to prepare their systems for the update. Notifications can include forum
|
||||
posts, tweets, and emails to partners and validators, including emails sent
|
||||
to the [Tendermint Security Mailing List][tmsec-mailing].
|
||||
6. 24 hours following this notification, the fixes are applied publicly and new
|
||||
releases are issued.
|
||||
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these
|
||||
releases, and then themselves issue new releases.
|
||||
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we
|
||||
notify the community, again, through the same channels as above. We also
|
||||
publish a Security Advisory on Github and publish the CVE, as long as neither
|
||||
the Security Advisory nor the CVE include any information on how to exploit
|
||||
these vulnerabilities beyond what information is already available in the
|
||||
patch itself.
|
||||
9. Once the community is notified, we will pay out any relevant bug bounties to
|
||||
submitters.
|
||||
10. One week after the releases go out, we will publish a post with further
|
||||
details on the vulnerability as well as our response to it.
|
||||
1. Once a security report is received, the Tendermint Core team works to verify the issue and confirm its severity level using CVSS.
|
||||
2. The Tendermint Core team collaborates with the Gaia team to determine the vulnerability’s potential impact on the Cosmos Hub.
|
||||
3. Patches are prepared for eligible releases of Tendermint in private repositories. See “Supported Releases” below for more information on which releases are considered eligible.
|
||||
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE Numbering Authority.
|
||||
5. We notify the community that a security release is coming, to give users time to prepare their systems for the update. Notifications can include forum posts, tweets, and emails to partners and validators, including emails sent to the [Tendermint Security Mailing List](https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc).
|
||||
6. 24 hours following this notification, the fixes are applied publicly and new releases are issued.
|
||||
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these releases, and then themselves issue new releases.
|
||||
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we notify the community, again, through the same channels as above. We also publish a Security Advisory on Github and publish the CVE, as long as neither the Security Advisory nor the CVE include any information on how to exploit these vulnerabilities beyond what information is already available in the patch itself.
|
||||
9. Once the community is notified, we will pay out any relevant bug bounties to submitters.
|
||||
10. One week after the releases go out, we will publish a post with further details on the vulnerability as well as our response to it.
|
||||
|
||||
This process can take some time. Every effort will be made to handle the bug in
|
||||
as timely a manner as possible, however it's important that we follow the
|
||||
process described above to ensure that disclosures are handled consistently and
|
||||
to keep Tendermint Core and its downstream dependent projects--including but not
|
||||
limited to Gaia and the Cosmos Hub--as secure as possible.
|
||||
This process can take some time. Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow the process described above to ensure that disclosures are handled consistently and to keep Tendermint Core and its downstream dependent projects--including but not limited to Gaia and the Cosmos Hub--as secure as possible.
|
||||
|
||||
### Example Timeline
|
||||
|
||||
The following is an example timeline for the triage and response. The required
|
||||
roles and team members are described in parentheses after each task; however,
|
||||
multiple people can play each role and each person may play multiple roles.
|
||||
The following is an example timeline for the triage and response. The required roles and team members are described in parentheses after each task; however, multiple people can play each role and each person may play multiple roles.
|
||||
|
||||
#### 24+ Hours Before Release Time
|
||||
|
||||
1. Request CVE number (ADMIN)
|
||||
2. Gather emails and other contact info for validators (COMMS LEAD)
|
||||
3. Create patches in a private security repo, and ensure that PRs are open
|
||||
targeting all relevant release branches (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
3. Create patches in a private security repo, and ensure that PRs are open targeting all relevant release branches (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
4. Test fixes on a testnet (TENDERMINT ENG, COSMOS SDK ENG)
|
||||
5. Write “Security Advisory” for forum (TENDERMINT LEAD)
|
||||
|
||||
@@ -86,23 +54,19 @@ multiple people can play each role and each person may play multiple roles.
|
||||
|
||||
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
|
||||
2. Post Tweet linking to forum post (COMMS LEAD)
|
||||
3. Announce security advisory/link to post in various other social channels
|
||||
(Telegram, Discord) (COMMS LEAD)
|
||||
3. Announce security advisory/link to post in various other social channels (Telegram, Discord) (COMMS LEAD)
|
||||
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
|
||||
|
||||
#### Release Time
|
||||
|
||||
1. Cut Tendermint releases for eligible versions (TENDERMINT ENG, TENDERMINT
|
||||
LEAD)
|
||||
1. Cut Tendermint releases for eligible versions (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
2. Cut Cosmos SDK release for eligible versions (COSMOS ENG)
|
||||
3. Cut Gaia release for eligible versions (GAIA ENG)
|
||||
4. Post “Security releases” on forum (TENDERMINT LEAD)
|
||||
5. Post new Tweet linking to forum post (COMMS LEAD)
|
||||
6. Remind everyone via social channels (Telegram, Discord) that the release is
|
||||
out (COMMS LEAD)
|
||||
6. Remind everyone via social channels (Telegram, Discord) that the release is out (COMMS LEAD)
|
||||
7. Send emails to validators or other users (COMMS LEAD)
|
||||
8. Publish Security Advisory and CVE, if CVE has no sensitive information
|
||||
(ADMIN)
|
||||
8. Publish Security Advisory and CVE, if CVE has no sensitive information (ADMIN)
|
||||
|
||||
#### After Release Time
|
||||
|
||||
@@ -116,22 +80,13 @@ multiple people can play each role and each person may play multiple roles.
|
||||
|
||||
## Supported Releases
|
||||
|
||||
The Tendermint Core team commits to releasing security patch releases for both
|
||||
the latest minor release as well for the major/minor release that the Cosmos Hub
|
||||
is running.
|
||||
The Tendermint Core team commits to releasing security patch releases for both the latest minor release as well for the major/minor release that the Cosmos Hub is running.
|
||||
|
||||
If you are running older versions of Tendermint Core, we encourage you to
|
||||
upgrade at your earliest opportunity so that you can receive security patches
|
||||
directly from the Tendermint repo. While you are welcome to backport security
|
||||
patches to older versions for your own use, we will not publish or promote these
|
||||
backports.
|
||||
If you are running older versions of Tendermint Core, we encourage you to upgrade at your earliest opportunity so that you can receive security patches directly from the Tendermint repo. While you are welcome to backport security patches to older versions for your own use, we will not publish or promote these backports.
|
||||
|
||||
## Scope
|
||||
|
||||
The full scope of our bug bounty program is outlined on our
|
||||
[Hacker One program page][hackerone]. Please also note that, in the interest of
|
||||
the safety of our users and staff, a few things are explicitly excluded from
|
||||
scope:
|
||||
The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/tendermint). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope:
|
||||
|
||||
* Any third-party services
|
||||
* Findings from physical testing, such as office access
|
||||
@@ -139,9 +94,7 @@ scope:
|
||||
|
||||
## Example Vulnerabilities
|
||||
|
||||
The following is a list of examples of the kinds of vulnerabilities that we’re
|
||||
most interested in. It is not exhaustive: there are other kinds of issues we may
|
||||
also be interested in!
|
||||
The following is a list of examples of the kinds of vulnerabilities that we’re most interested in. It is not exhaustive: there are other kinds of issues we may also be interested in!
|
||||
|
||||
### Specification
|
||||
|
||||
@@ -153,8 +106,7 @@ also be interested in!
|
||||
|
||||
Assuming less than 1/3 of the voting power is Byzantine (malicious):
|
||||
|
||||
* Validation of blockchain data structures, including blocks, block parts,
|
||||
votes, and so on
|
||||
* Validation of blockchain data structures, including blocks, block parts, votes, and so on
|
||||
* Execution of blocks
|
||||
* Validator set changes
|
||||
* Proposer round robin
|
||||
@@ -204,6 +156,3 @@ Attacks may come through the P2P network or the RPC layer:
|
||||
|
||||
* Core verification
|
||||
* Bisection/sequential algorithms
|
||||
|
||||
[hackerone]: https://hackerone.com/cosmos
|
||||
[tmsec-mailing]: https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc
|
||||
|
||||
@@ -98,7 +98,7 @@ Sometimes it's necessary to rename libraries to avoid naming collisions or ambig
|
||||
* Make use of table driven testing where possible and not-cumbersome
|
||||
* [Inspiration](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go)
|
||||
* Make use of [assert](https://godoc.org/github.com/stretchr/testify/assert) and [require](https://godoc.org/github.com/stretchr/testify/require)
|
||||
* When using mocks, it is recommended to use Testify [mock](<https://pkg.go.dev/github.com/stretchr/testify/mock>
|
||||
* When using mocks, it is recommended to use Testify [mock] (<https://pkg.go.dev/github.com/stretchr/testify/mock>
|
||||
) along with [Mockery](https://github.com/vektra/mockery) for autogeneration
|
||||
|
||||
## Errors
|
||||
|
||||
147
UPGRADING.md
147
UPGRADING.md
@@ -1,68 +1,85 @@
|
||||
# Upgrading Tendermint Core
|
||||
|
||||
This guide provides instructions for upgrading to specific versions of
|
||||
Tendermint Core.
|
||||
This guide provides instructions for upgrading to specific versions of Tendermint Core.
|
||||
|
||||
## Unreleased
|
||||
|
||||
## Config Changes
|
||||
|
||||
* A new config field, `BootstrapPeers` has been introduced as a means of
|
||||
adding a list of addresses to the addressbook upon initializing a node. This is an
|
||||
alternative to `PersistentPeers`. `PersistentPeers` shold be only used for
|
||||
nodes that you want to keep a constant connection with i.e. sentry nodes
|
||||
|
||||
----
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
* The `ABCIVersion` is now `1.0.0`.
|
||||
* Added `AbciVersion` to `RequestInfo`. Applications should check that the ABCI version they expect is being used in order to avoid unimplemented changes errors.
|
||||
* The method `SetOption` has been removed from the ABCI.Client interface. This feature was used in the early ABCI implementation's.
|
||||
* Messages are written to a byte stream using uin64 length delimiters instead of int64.
|
||||
* When mempool `v1` is enabled, transactions broadcasted via `sync` mode may return a successful
|
||||
response with a transaction hash indicating that the transaction was successfully inserted into
|
||||
the mempool. While this is true for `v0`, the `v1` mempool reactor may at a later point in time
|
||||
evict or even drop this transaction after a hash has been returned. Thus, the user or client must
|
||||
query for that transaction to check if it is still in the mempool.
|
||||
|
||||
* Added new ABCI methods `PrepareProposal` and `ProcessProposal`. For details,
|
||||
please see the [spec](spec/abci/README.md). Applications upgrading to
|
||||
v0.37.0 must implement these methods, at the very minimum, as described
|
||||
[here](./spec/abci/abci++_app_requirements.md)
|
||||
* Deduplicated `ConsensusParams` and `BlockParams`.
|
||||
In the v0.34 branch they are defined both in `abci/types.proto` and `types/params.proto`.
|
||||
The definitions in `abci/types.proto` have been removed.
|
||||
In-process applications should make sure they are not using the deleted
|
||||
version of those structures.
|
||||
* In v0.34, messages on the wire used to be length-delimited with `int64` varint
|
||||
values, which was inconsistent with the `uint64` varint length delimiters used
|
||||
in the P2P layer. Both now consistently use `uint64` varint length delimiters.
|
||||
* Added `AbciVersion` to `RequestInfo`.
|
||||
Applications should check that Tendermint's ABCI version matches the one they expect
|
||||
in order to ensure compatibility.
|
||||
* The `SetOption` method has been removed from the ABCI `Client` interface.
|
||||
The corresponding Protobuf types have been deprecated.
|
||||
* The `key` and `value` fields in the `EventAttribute` type have been changed
|
||||
from type `bytes` to `string`. As per the [Protocol Buffers updating
|
||||
guidelines](https://developers.google.com/protocol-buffers/docs/proto3#updating),
|
||||
this should have no effect on the wire-level encoding for UTF8-encoded
|
||||
strings.
|
||||
### Config Changes
|
||||
|
||||
## v0.34.24
|
||||
* `fast_sync = "v1"` and `fast_sync = "v2"` are no longer supported. Please use `v0` instead.
|
||||
|
||||
Note that in [\#9724](https://github.com/tendermint/tendermint/pull/9724) we
|
||||
un-prettified the JSON output (i.e. removed all indentation) of the HTTP and
|
||||
WebSocket RPC for performance and subscription stability reasons. We recommend
|
||||
using a tool such as [jq](https://github.com/stedolan/jq) to obtain prettified
|
||||
output if you rely on that prettified output in some way.
|
||||
* All config parameters are now hyphen-case (also known as kebab-case) instead of snake_case. Before restarting the node make sure
|
||||
you have updated all the variables in your `config.toml` file.
|
||||
|
||||
## v0.34.20
|
||||
* Added `--mode` flag and `mode` config variable on `config.toml` for setting Mode of the Node: `full` | `validator` | `seed` (default: `full`)
|
||||
[ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md)
|
||||
|
||||
* `BootstrapPeers` has been added as part of the new p2p stack. This will eventually replace
|
||||
`Seeds`. Bootstrap peers are connected with on startup if needed for peer discovery. Unlike
|
||||
persistent peers, there's no gaurantee that the node will remain connected with these peers.
|
||||
|
||||
### Feature: Priority Mempool
|
||||
* configuration values starting with `priv-validator-` have moved to the new
|
||||
`priv-validator` section, without the `priv-validator-` prefix.
|
||||
|
||||
This release backports an implementation of the Priority Mempool from the v0.35
|
||||
branch. This implementation of the mempool permits the application to set a
|
||||
priority on each transaction during CheckTx, and during block selection the
|
||||
highest-priority transactions are chosen (subject to the constraints on size
|
||||
and gas cost).
|
||||
* The fast sync process as well as the blockchain package and service has all
|
||||
been renamed to block sync
|
||||
|
||||
Operators can enable the priority mempool by setting `mempool.version` to
|
||||
`"v1"` in the `config.toml`. For more technical details about the priority
|
||||
mempool, see [ADR 067: Mempool
|
||||
Refactor](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-067-mempool-refactor.md).
|
||||
### CLI Changes
|
||||
|
||||
* You must now specify the node mode (validator|full|seed) in `tendermint init [mode]`
|
||||
|
||||
* If you had previously used `tendermint gen_node_key` to generate a new node
|
||||
key, keep in mind that it no longer saves the output to a file. You can use
|
||||
`tendermint init validator` or pipe the output of `tendermint gen_node_key` to
|
||||
`$TMHOME/config/node_key.json`:
|
||||
|
||||
```
|
||||
$ tendermint gen_node_key > $TMHOME/config/node_key.json
|
||||
```
|
||||
|
||||
* CLI commands and flags are all now hyphen-case instead of snake_case.
|
||||
Make sure to adjust any scripts that calls a cli command with snake_casing
|
||||
|
||||
### API Changes
|
||||
|
||||
The p2p layer was reimplemented as part of the 0.35 release cycle, and
|
||||
all reactors were refactored. As part of that work these
|
||||
implementations moved into the `internal` package and are no longer
|
||||
considered part of the public Go API of tendermint. These packages
|
||||
are:
|
||||
|
||||
- `p2p`
|
||||
- `mempool`
|
||||
- `consensus`
|
||||
- `statesync`
|
||||
- `blockchain`
|
||||
- `evidence`
|
||||
|
||||
Accordingly, the space `node` package was changed to reduce access to
|
||||
tendermint internals: applications that use tendermint as a library
|
||||
will need to change to accommodate these changes. Most notably:
|
||||
|
||||
- The `Node` type has become internal, and all constructors return a
|
||||
`service.Service` implementation.
|
||||
|
||||
- The `node.DefaultNewNode` and `node.NewNode` constructors are no
|
||||
longer exported and have been replaced with `node.New` and
|
||||
`node.NewDefault` which provide more functional interfaces.
|
||||
|
||||
### RPC changes
|
||||
|
||||
Mark gRPC in the RPC layer as deprecated and to be removed in 0.36.
|
||||
|
||||
## v0.34.0
|
||||
|
||||
@@ -80,7 +97,7 @@ Note also that Tendermint 0.34 also requires Go 1.16 or higher.
|
||||
were added to support the new State Sync feature.
|
||||
Previously, syncing a new node to a preexisting network could take days; but with State Sync,
|
||||
new nodes are able to join a network in a matter of seconds.
|
||||
Read [the spec](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/apps.md#state-sync)
|
||||
Read [the spec](https://docs.tendermint.com/master/spec/abci/apps.html#state-sync)
|
||||
if you want to learn more about State Sync, or if you'd like your application to use it.
|
||||
(If you don't want to support State Sync in your application, you can just implement these new
|
||||
ABCI methods as no-ops, leaving them empty.)
|
||||
@@ -96,7 +113,7 @@ Note also that Tendermint 0.34 also requires Go 1.16 or higher.
|
||||
Applications should be able to handle these evidence types
|
||||
(i.e., through slashing or other accountability measures).
|
||||
|
||||
* The [`PublicKey` type](https://github.com/tendermint/tendermint/blob/main/proto/tendermint/crypto/keys.proto#L13-L15)
|
||||
* The [`PublicKey` type](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/crypto/keys.proto#L13-L15)
|
||||
(used in ABCI as part of `ValidatorUpdate`) now uses a `oneof` protobuf type.
|
||||
Note that since Tendermint only supports ed25519 validator keys, there's only one
|
||||
option in the `oneof`. For more, see "Protocol Buffers," below.
|
||||
@@ -104,6 +121,8 @@ Note also that Tendermint 0.34 also requires Go 1.16 or higher.
|
||||
* The field `Proof`, on the ABCI type `ResponseQuery`, is now named `ProofOps`.
|
||||
For more, see "Crypto," below.
|
||||
|
||||
* The method `SetOption` has been removed from the ABCI.Client interface. This feature was used in the early ABCI implementation's.
|
||||
|
||||
### P2P Protocol
|
||||
|
||||
The default codec is now proto3, not amino. The schema files can be found in the `/proto`
|
||||
@@ -112,8 +131,8 @@ directory. For more, see "Protobuf," below.
|
||||
### Blockchain Protocol
|
||||
|
||||
* `Header#LastResultsHash`, which is the root hash of a Merkle tree built from
|
||||
`ResponseDeliverTx(Code, Data)` as of v0.34 also includes `GasWanted` and `GasUsed`
|
||||
fields.
|
||||
`ResponseDeliverTx(Code, Data)` as of v0.34 also includes `GasWanted` and `GasUsed`
|
||||
fields.
|
||||
|
||||
* Merkle hashes of empty trees previously returned nothing, but now return the hash of an empty input,
|
||||
to conform with [RFC-6962](https://tools.ietf.org/html/rfc6962).
|
||||
@@ -203,7 +222,7 @@ The `bech32` package has moved to the Cosmos SDK:
|
||||
### CLI
|
||||
|
||||
The `tendermint lite` command has been renamed to `tendermint light` and has a slightly different API.
|
||||
See [the docs](https://docs.tendermint.com/v0.33/tendermint-core/light-client-protocol.html#http-proxy) for details.
|
||||
See [the docs](https://docs.tendermint.com/master/tendermint-core/light-client-protocol.html#http-proxy) for details.
|
||||
|
||||
### Light Client
|
||||
|
||||
@@ -217,7 +236,7 @@ Other user-relevant changes include:
|
||||
* The `Verifier` was broken up into two pieces:
|
||||
* Core verification logic (pure `VerifyX` functions)
|
||||
* `Client` object, which represents the complete light client
|
||||
* The new light client stores headers and validator sets as `LightBlock`s
|
||||
* The new light clients stores headers & validator sets as `LightBlock`s
|
||||
* The RPC client can be found in the `/rpc` directory.
|
||||
* The HTTP(S) proxy is located in the `/proxy` directory.
|
||||
|
||||
@@ -359,7 +378,7 @@ Evidence Params has been changed to include duration.
|
||||
### RPC Changes
|
||||
|
||||
* `/validators` is now paginated (default: 30 vals per page)
|
||||
* `/block_results` response format updated [see RPC docs for details](https://docs.tendermint.com/v0.33/rpc/#/Info/block_results)
|
||||
* `/block_results` response format updated [see RPC docs for details](https://docs.tendermint.com/master/rpc/#/Info/block_results)
|
||||
* Event suffix has been removed from the ID in event responses
|
||||
* IDs are now integers not `json-client-XYZ`
|
||||
|
||||
@@ -478,11 +497,11 @@ the compilation tag:
|
||||
|
||||
Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
|
||||
use `make build_c` / `make install_c` (full instructions can be found at
|
||||
<https://docs.tendermint.com/v0.33/introduction/install.html#compile-with-cleveldb-support>)
|
||||
<https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support>)
|
||||
|
||||
## v0.31.0
|
||||
|
||||
This release contains a breaking change to the behavior of the pubsub system.
|
||||
This release contains a breaking change to the behaviour of the pubsub system.
|
||||
It also contains some minor breaking changes in the Go API and ABCI.
|
||||
There are no changes to the block or p2p protocols, so v0.31.0 should work fine
|
||||
with blockchains created from the v0.30 series.
|
||||
@@ -553,14 +572,14 @@ due to changes in how various data structures are hashed.
|
||||
Any implementations of Tendermint blockchain verification, including lite clients,
|
||||
will need to be updated. For specific details:
|
||||
|
||||
* [Merkle tree](https://github.com/tendermint/tendermint/blob/main/spec/blockchain/encoding.md#merkle-trees)
|
||||
* [ConsensusParams](https://github.com/tendermint/tendermint/blob/main/spec/blockchain/state.md#consensusparams)
|
||||
* [Merkle tree](https://github.com/tendermint/spec/blob/master/spec/blockchain/encoding.md#merkle-trees)
|
||||
* [ConsensusParams](https://github.com/tendermint/spec/blob/master/spec/blockchain/state.md#consensusparams)
|
||||
|
||||
There was also a small change to field ordering in the vote struct. Any
|
||||
implementations of an out-of-process validator (like a Key-Management Server)
|
||||
will need to be updated. For specific details:
|
||||
|
||||
* [Vote](https://github.com/tendermint/tendermint/blob/main/spec/consensus/signing.md#votes)
|
||||
* [Vote](https://github.com/tendermint/spec/blob/master/spec/consensus/signing.md#votes)
|
||||
|
||||
Finally, the proposer selection algorithm continues to evolve. See the
|
||||
[work-in-progress
|
||||
@@ -681,7 +700,7 @@ to `timeout_propose = "3s"`.
|
||||
|
||||
### RPC Changes
|
||||
|
||||
The default behavior of `/abci_query` has been changed to not return a proof,
|
||||
The default behaviour of `/abci_query` has been changed to not return a proof,
|
||||
and the name of the parameter that controls this has been changed from `trusted`
|
||||
to `prove`. To get proofs with your queries, ensure you set `prove=true`.
|
||||
|
||||
|
||||
66
Vagrantfile
vendored
66
Vagrantfile
vendored
@@ -1,66 +0,0 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "ubuntu/focal64"
|
||||
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
v.memory = 4096
|
||||
v.cpus = 2
|
||||
end
|
||||
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
apt-get update
|
||||
|
||||
# install base requirements
|
||||
apt-get install -y --no-install-recommends wget curl jq zip \
|
||||
make shellcheck bsdmainutils psmisc
|
||||
apt-get install -y language-pack-en
|
||||
|
||||
# install docker
|
||||
apt-get install -y --no-install-recommends apt-transport-https \
|
||||
ca-certificates \
|
||||
curl \
|
||||
gnupg-agent \
|
||||
software-properties-common
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
||||
add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable"
|
||||
apt-get update
|
||||
apt-get install -y docker-ce
|
||||
usermod -aG docker vagrant
|
||||
|
||||
# install go
|
||||
wget -q https://dl.google.com/go/go1.15.linux-amd64.tar.gz
|
||||
tar -xvf go1.15.linux-amd64.tar.gz
|
||||
mv go /usr/local
|
||||
rm -f go1.15.linux-amd64.tar.gz
|
||||
|
||||
# install nodejs (for docs)
|
||||
curl -sL https://deb.nodesource.com/setup_11.x | bash -
|
||||
apt-get install -y nodejs
|
||||
|
||||
# cleanup
|
||||
apt-get autoremove -y
|
||||
|
||||
# set env variables
|
||||
echo 'export GOROOT=/usr/local/go' >> /home/vagrant/.bash_profile
|
||||
echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.bash_profile
|
||||
echo 'export PATH=$PATH:$GOROOT/bin:$GOPATH/bin' >> /home/vagrant/.bash_profile
|
||||
echo 'export LC_ALL=en_US.UTF-8' >> /home/vagrant/.bash_profile
|
||||
echo 'cd go/src/github.com/tendermint/tendermint' >> /home/vagrant/.bash_profile
|
||||
|
||||
mkdir -p /home/vagrant/go/bin
|
||||
mkdir -p /home/vagrant/go/src/github.com/tendermint
|
||||
ln -s /vagrant /home/vagrant/go/src/github.com/tendermint/tendermint
|
||||
|
||||
chown -R vagrant:vagrant /home/vagrant/go
|
||||
chown vagrant:vagrant /home/vagrant/.bash_profile
|
||||
|
||||
# get all deps and tools, ready to install/test
|
||||
su - vagrant -c 'source /home/vagrant/.bash_profile'
|
||||
su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make tools'
|
||||
SHELL
|
||||
end
|
||||
@@ -19,8 +19,8 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g
|
||||
|
||||
A detailed description of the ABCI methods and message types is contained in:
|
||||
|
||||
- [The main spec](https://github.com/tendermint/tendermint/blob/main/spec/abci/README.md)
|
||||
- [A protobuf file](../proto/tendermint/types/types.proto)
|
||||
- [The main spec](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md)
|
||||
- [A protobuf file](../proto/tendermint/abci/types.proto)
|
||||
- [A Go interface](./types/application.go)
|
||||
|
||||
## Protocol Buffers
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -15,30 +15,53 @@ const (
|
||||
echoRetryIntervalSeconds = 1
|
||||
)
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh Client
|
||||
//go:generate mockery --case underscore --name Client
|
||||
|
||||
// Client defines the interface for an ABCI client.
|
||||
// Client defines an interface for an ABCI client.
|
||||
//
|
||||
// All `Async` methods return a `ReqRes` object and an error.
|
||||
// All `Sync` methods return the appropriate protobuf ResponseXxx struct and an error.
|
||||
//
|
||||
// NOTE these are client errors, eg. ABCI socket connectivity issues.
|
||||
// Application-related errors are reflected in response via ABCI error codes
|
||||
// and (potentially) error response.
|
||||
// and logs.
|
||||
type Client interface {
|
||||
service.Service
|
||||
types.Application
|
||||
|
||||
// TODO: remove as each method now returns an error
|
||||
Error() error
|
||||
// TODO: remove as this is not implemented
|
||||
Flush(context.Context) error
|
||||
Echo(context.Context, string) (*types.ResponseEcho, error)
|
||||
|
||||
// FIXME: All other operations are run synchronously and rely
|
||||
// on the caller to dictate concurrency (i.e. run a go routine),
|
||||
// with the exception of `CheckTxAsync` which we maintain
|
||||
// for the v0 mempool. We should explore refactoring the
|
||||
// mempool to remove this vestige behavior.
|
||||
SetResponseCallback(Callback)
|
||||
CheckTxAsync(context.Context, *types.RequestCheckTx) (*ReqRes, error)
|
||||
Error() error
|
||||
|
||||
// Asynchronous requests
|
||||
FlushAsync(context.Context) (*ReqRes, error)
|
||||
EchoAsync(ctx context.Context, msg string) (*ReqRes, error)
|
||||
InfoAsync(context.Context, types.RequestInfo) (*ReqRes, error)
|
||||
DeliverTxAsync(context.Context, types.RequestDeliverTx) (*ReqRes, error)
|
||||
CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error)
|
||||
QueryAsync(context.Context, types.RequestQuery) (*ReqRes, error)
|
||||
CommitAsync(context.Context) (*ReqRes, error)
|
||||
InitChainAsync(context.Context, types.RequestInitChain) (*ReqRes, error)
|
||||
BeginBlockAsync(context.Context, types.RequestBeginBlock) (*ReqRes, error)
|
||||
EndBlockAsync(context.Context, types.RequestEndBlock) (*ReqRes, error)
|
||||
ListSnapshotsAsync(context.Context, types.RequestListSnapshots) (*ReqRes, error)
|
||||
OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error)
|
||||
LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error)
|
||||
ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error)
|
||||
|
||||
// Synchronous requests
|
||||
FlushSync(context.Context) error
|
||||
EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error)
|
||||
InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
|
||||
DeliverTxSync(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
|
||||
CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||
QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
|
||||
CommitSync(context.Context) (*types.ResponseCommit, error)
|
||||
InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
|
||||
EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error)
|
||||
ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
||||
OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
||||
LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
||||
ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -64,15 +87,9 @@ type ReqRes struct {
|
||||
*sync.WaitGroup
|
||||
*types.Response // Not set atomically, so be sure to use WaitGroup.
|
||||
|
||||
mtx tmsync.Mutex
|
||||
|
||||
// callbackInvoked as a variable to track if the callback was already
|
||||
// invoked during the regular execution of the request. This variable
|
||||
// allows clients to set the callback simultaneously without potentially
|
||||
// invoking the callback twice by accident, once when 'SetCallback' is
|
||||
// called and once during the normal request.
|
||||
callbackInvoked bool
|
||||
cb func(*types.Response) // A single callback that may be set.
|
||||
mtx tmsync.RWMutex
|
||||
done bool // Gets set to true once *after* WaitGroup.Done().
|
||||
cb func(*types.Response) // A single callback that may be set.
|
||||
}
|
||||
|
||||
func NewReqRes(req *types.Request) *ReqRes {
|
||||
@@ -81,8 +98,8 @@ func NewReqRes(req *types.Request) *ReqRes {
|
||||
WaitGroup: waitGroup1(),
|
||||
Response: nil,
|
||||
|
||||
callbackInvoked: false,
|
||||
cb: nil,
|
||||
done: false,
|
||||
cb: nil,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,7 +109,7 @@ func NewReqRes(req *types.Request) *ReqRes {
|
||||
func (r *ReqRes) SetCallback(cb func(res *types.Response)) {
|
||||
r.mtx.Lock()
|
||||
|
||||
if r.callbackInvoked {
|
||||
if r.done {
|
||||
r.mtx.Unlock()
|
||||
cb(r.Response)
|
||||
return
|
||||
@@ -111,7 +128,6 @@ func (r *ReqRes) InvokeCallback() {
|
||||
if r.cb != nil {
|
||||
r.cb(r.Response)
|
||||
}
|
||||
r.callbackInvoked = true
|
||||
}
|
||||
|
||||
// GetCallback returns the configured callback of the ReqRes object which may be
|
||||
@@ -121,9 +137,16 @@ func (r *ReqRes) InvokeCallback() {
|
||||
//
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5439
|
||||
func (r *ReqRes) GetCallback() func(*types.Response) {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
return r.cb
|
||||
}
|
||||
|
||||
// SetDone marks the ReqRes object as done.
|
||||
func (r *ReqRes) SetDone() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
return r.cb
|
||||
r.done = true
|
||||
}
|
||||
|
||||
func waitGroup1() (wg *sync.WaitGroup) {
|
||||
|
||||
29
abci/client/doc.go
Normal file
29
abci/client/doc.go
Normal file
@@ -0,0 +1,29 @@
|
||||
// Package abcicli provides an ABCI implementation in Go.
|
||||
//
|
||||
// There are 3 clients available:
|
||||
// 1. socket (unix or TCP)
|
||||
// 2. local (in memory)
|
||||
// 3. gRPC
|
||||
//
|
||||
// ## Socket client
|
||||
//
|
||||
// async: the client maintains an internal buffer of a fixed size. when the
|
||||
// buffer becomes full, all Async calls will return an error immediately.
|
||||
//
|
||||
// sync: the client blocks on 1) enqueuing the Sync request 2) enqueuing the
|
||||
// Flush requests 3) waiting for the Flush response
|
||||
//
|
||||
// ## Local client
|
||||
//
|
||||
// async: global mutex is locked during each call (meaning it's not really async!)
|
||||
// sync: global mutex is locked during each call
|
||||
//
|
||||
// ## gRPC client
|
||||
//
|
||||
// async: gRPC is synchronous, but an internal buffer of a fixed size is used
|
||||
// to store responses and later call callbacks (separate goroutine per
|
||||
// response).
|
||||
//
|
||||
// sync: waits for all Async calls to complete (essentially what Flush does in
|
||||
// the socket client) and calls Sync method.
|
||||
package abcicli
|
||||
@@ -8,31 +8,40 @@ import (
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
var _ Client = (*grpcClient)(nil)
|
||||
|
||||
// A stripped copy of the remoteClient that makes
|
||||
// synchronous calls using grpc
|
||||
// A gRPC client.
|
||||
type grpcClient struct {
|
||||
service.BaseService
|
||||
mustConnect bool
|
||||
|
||||
client types.ABCIClient
|
||||
client types.ABCIApplicationClient
|
||||
conn *grpc.ClientConn
|
||||
chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool
|
||||
|
||||
mtx sync.Mutex
|
||||
mtx tmsync.RWMutex
|
||||
addr string
|
||||
err error
|
||||
resCb func(*types.Request, *types.Response) // listens to all callbacks
|
||||
}
|
||||
|
||||
var _ Client = (*grpcClient)(nil)
|
||||
|
||||
// NewGRPCClient creates a gRPC client, which will connect to addr upon the
|
||||
// start. Note Client#Start returns an error if connection is unsuccessful and
|
||||
// mustConnect is true.
|
||||
//
|
||||
// GRPC calls are synchronous, but some callbacks expect to be called
|
||||
// asynchronously (eg. the mempool expects to be able to lock to remove bad txs
|
||||
// from cache). To accommodate, we finish each call in its own go-routine,
|
||||
// which is expensive, but easy - if you want something better, use the socket
|
||||
// protocol! maybe one day, if people really want it, we use grpc streams, but
|
||||
// hopefully not :D
|
||||
func NewGRPCClient(addr string, mustConnect bool) Client {
|
||||
cli := &grpcClient{
|
||||
addr: addr,
|
||||
@@ -54,10 +63,6 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OnStart() error {
|
||||
if err := cli.BaseService.OnStart(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// This processes asynchronous request/response messages and dispatches
|
||||
// them to callbacks.
|
||||
go func() {
|
||||
@@ -66,6 +71,7 @@ func (cli *grpcClient) OnStart() error {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
reqres.SetDone()
|
||||
reqres.Done()
|
||||
|
||||
// Notify client listener if set
|
||||
@@ -74,7 +80,9 @@ func (cli *grpcClient) OnStart() error {
|
||||
}
|
||||
|
||||
// Notify reqRes listener if set
|
||||
reqres.InvokeCallback()
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(reqres.Response)
|
||||
}
|
||||
}
|
||||
for reqres := range cli.chReqRes {
|
||||
if reqres != nil {
|
||||
@@ -87,10 +95,7 @@ func (cli *grpcClient) OnStart() error {
|
||||
|
||||
RETRY_LOOP:
|
||||
for {
|
||||
conn, err := grpc.Dial(cli.addr,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithContextDialer(dialerFunc),
|
||||
)
|
||||
conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
if cli.mustConnect {
|
||||
return err
|
||||
@@ -101,7 +106,7 @@ RETRY_LOOP:
|
||||
}
|
||||
|
||||
cli.Logger.Info("Dialed server. Waiting for echo.", "addr", cli.addr)
|
||||
client := types.NewABCIClient(conn)
|
||||
client := types.NewABCIApplicationClient(conn)
|
||||
cli.conn = conn
|
||||
|
||||
ENSURE_CONNECTED:
|
||||
@@ -120,8 +125,6 @@ RETRY_LOOP:
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OnStop() {
|
||||
cli.BaseService.OnStop()
|
||||
|
||||
if cli.conn != nil {
|
||||
cli.conn.Close()
|
||||
}
|
||||
@@ -146,8 +149,8 @@ func (cli *grpcClient) StopForError(err error) {
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Error() error {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.mtx.RLock()
|
||||
defer cli.mtx.RUnlock()
|
||||
return cli.err
|
||||
}
|
||||
|
||||
@@ -155,93 +158,349 @@ func (cli *grpcClient) Error() error {
|
||||
// NOTE: callback may get internally generated flush responses.
|
||||
func (cli *grpcClient) SetResponseCallback(resCb Callback) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.resCb = resCb
|
||||
cli.mtx.Unlock()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *grpcClient) CheckTxAsync(ctx context.Context, req *types.RequestCheckTx) (*ReqRes, error) {
|
||||
res, err := cli.client.CheckTx(ctx, req, grpc.WaitForReady(true))
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
|
||||
req := types.ToRequestEcho(msg)
|
||||
res, err := cli.client.Echo(ctx, req.GetEcho(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(types.ToRequestCheckTx(req), &types.Response{Value: &types.Response_CheckTx{CheckTx: res}}), nil
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Echo{Echo: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
req := types.ToRequestFlush()
|
||||
res, err := cli.client.Flush(ctx, req.GetFlush(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Flush{Flush: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) InfoAsync(ctx context.Context, params types.RequestInfo) (*ReqRes, error) {
|
||||
req := types.ToRequestInfo(params)
|
||||
res, err := cli.client.Info(ctx, req.GetInfo(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
|
||||
req := types.ToRequestDeliverTx(params)
|
||||
res, err := cli.client.DeliverTx(ctx, req.GetDeliverTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestCheckTx) (*ReqRes, error) {
|
||||
req := types.ToRequestCheckTx(params)
|
||||
res, err := cli.client.CheckTx(ctx, req.GetCheckTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) QueryAsync(ctx context.Context, params types.RequestQuery) (*ReqRes, error) {
|
||||
req := types.ToRequestQuery(params)
|
||||
res, err := cli.client.Query(ctx, req.GetQuery(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Query{Query: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
req := types.ToRequestCommit()
|
||||
res, err := cli.client.Commit(ctx, req.GetCommit(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Commit{Commit: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) InitChainAsync(ctx context.Context, params types.RequestInitChain) (*ReqRes, error) {
|
||||
req := types.ToRequestInitChain(params)
|
||||
res, err := cli.client.InitChain(ctx, req.GetInitChain(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) BeginBlockAsync(ctx context.Context, params types.RequestBeginBlock) (*ReqRes, error) {
|
||||
req := types.ToRequestBeginBlock(params)
|
||||
res, err := cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) EndBlockAsync(ctx context.Context, params types.RequestEndBlock) (*ReqRes, error) {
|
||||
req := types.ToRequestEndBlock(params)
|
||||
res, err := cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params types.RequestListSnapshots) (*ReqRes, error) {
|
||||
req := types.ToRequestListSnapshots(params)
|
||||
res, err := cli.client.ListSnapshots(ctx, req.GetListSnapshots(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_ListSnapshots{ListSnapshots: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) OfferSnapshotAsync(ctx context.Context, params types.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
req := types.ToRequestOfferSnapshot(params)
|
||||
res, err := cli.client.OfferSnapshot(ctx, req.GetOfferSnapshot(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_OfferSnapshot{OfferSnapshot: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) LoadSnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
params types.RequestLoadSnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
req := types.ToRequestLoadSnapshotChunk(params)
|
||||
res, err := cli.client.LoadSnapshotChunk(ctx, req.GetLoadSnapshotChunk(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) ApplySnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
params types.RequestApplySnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
req := types.ToRequestApplySnapshotChunk(params)
|
||||
res, err := cli.client.ApplySnapshotChunk(ctx, req.GetApplySnapshotChunk(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(
|
||||
ctx,
|
||||
req,
|
||||
&types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}},
|
||||
)
|
||||
}
|
||||
|
||||
// finishAsyncCall creates a ReqRes for an async call, and immediately populates it
|
||||
// with the response. We don't complete it until it's been ordered via the channel.
|
||||
func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes {
|
||||
func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request, res *types.Response) (*ReqRes, error) {
|
||||
reqres := NewReqRes(req)
|
||||
reqres.Response = res
|
||||
cli.chReqRes <- reqres // use channel for async responses, since they must be ordered
|
||||
return reqres
|
||||
select {
|
||||
case cli.chReqRes <- reqres: // use channel for async responses, since they must be ordered
|
||||
return reqres, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// finishSyncCall waits for an async call to complete. It is necessary to call all
|
||||
// sync calls asynchronously as well, to maintain call and response ordering via
|
||||
// the channel, and this method will wait until the async call completes.
|
||||
func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *types.Response {
|
||||
// It's possible that the callback is called twice, since the callback can
|
||||
// be called immediately on SetCallback() in addition to after it has been
|
||||
// set. This is because completing the ReqRes happens in a separate critical
|
||||
// section from the one where the callback is called: there is a race where
|
||||
// SetCallback() is called between completing the ReqRes and dispatching the
|
||||
// callback.
|
||||
//
|
||||
// We also buffer the channel with 1 response, since SetCallback() will be
|
||||
// called synchronously if the reqres is already completed, in which case
|
||||
// it will block on sending to the channel since it hasn't gotten around to
|
||||
// receiving from it yet.
|
||||
//
|
||||
// ReqRes should really handle callback dispatch internally, to guarantee
|
||||
// that it's only called once and avoid the above race conditions.
|
||||
var once sync.Once
|
||||
ch := make(chan *types.Response, 1)
|
||||
reqres.SetCallback(func(res *types.Response) {
|
||||
once.Do(func() {
|
||||
ch <- res
|
||||
})
|
||||
})
|
||||
return <-ch
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *grpcClient) Flush(ctx context.Context) error {
|
||||
_, err := cli.client.Flush(ctx, types.ToRequestFlush().GetFlush(), grpc.WaitForReady(true))
|
||||
return err
|
||||
func (cli *grpcClient) FlushSync(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
return cli.client.Echo(ctx, types.ToRequestEcho(msg).GetEcho(), grpc.WaitForReady(true))
|
||||
func (cli *grpcClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
reqres, err := cli.EchoAsync(ctx, msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetEcho(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Info(ctx context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
return cli.client.Info(ctx, req, grpc.WaitForReady(true))
|
||||
func (cli *grpcClient) InfoSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInfo,
|
||||
) (*types.ResponseInfo, error) {
|
||||
reqres, err := cli.InfoAsync(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetInfo(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTx(ctx context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
return cli.client.CheckTx(ctx, req, grpc.WaitForReady(true))
|
||||
func (cli *grpcClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
params types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
|
||||
reqres, err := cli.DeliverTxAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Query(ctx context.Context, req *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
return cli.client.Query(ctx, types.ToRequestQuery(req).GetQuery(), grpc.WaitForReady(true))
|
||||
func (cli *grpcClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
params types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
|
||||
reqres, err := cli.CheckTxAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetCheckTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Commit(ctx context.Context, req *types.RequestCommit) (*types.ResponseCommit, error) {
|
||||
return cli.client.Commit(ctx, types.ToRequestCommit().GetCommit(), grpc.WaitForReady(true))
|
||||
func (cli *grpcClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
reqres, err := cli.QueryAsync(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetQuery(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InitChain(ctx context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
return cli.client.InitChain(ctx, types.ToRequestInitChain(req).GetInitChain(), grpc.WaitForReady(true))
|
||||
func (cli *grpcClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
reqres, err := cli.CommitAsync(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetCommit(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ListSnapshots(ctx context.Context, req *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
return cli.client.ListSnapshots(ctx, types.ToRequestListSnapshots(req).GetListSnapshots(), grpc.WaitForReady(true))
|
||||
func (cli *grpcClient) InitChainSync(
|
||||
ctx context.Context,
|
||||
params types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
reqres, err := cli.InitChainAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetInitChain(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OfferSnapshot(ctx context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
return cli.client.OfferSnapshot(ctx, types.ToRequestOfferSnapshot(req).GetOfferSnapshot(), grpc.WaitForReady(true))
|
||||
func (cli *grpcClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
reqres, err := cli.BeginBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetBeginBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) LoadSnapshotChunk(ctx context.Context, req *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
return cli.client.LoadSnapshotChunk(ctx, types.ToRequestLoadSnapshotChunk(req).GetLoadSnapshotChunk(), grpc.WaitForReady(true))
|
||||
func (cli *grpcClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
reqres, err := cli.EndBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetEndBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ApplySnapshotChunk(ctx context.Context, req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
return cli.client.ApplySnapshotChunk(ctx, types.ToRequestApplySnapshotChunk(req).GetApplySnapshotChunk(), grpc.WaitForReady(true))
|
||||
func (cli *grpcClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
params types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
reqres, err := cli.ListSnapshotsAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetListSnapshots(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
return cli.client.PrepareProposal(ctx, types.ToRequestPrepareProposal(req).GetPrepareProposal(), grpc.WaitForReady(true))
|
||||
func (cli *grpcClient) OfferSnapshotSync(
|
||||
ctx context.Context,
|
||||
params types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
reqres, err := cli.OfferSnapshotAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetOfferSnapshot(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
return cli.client.ProcessProposal(ctx, types.ToRequestProcessProposal(req).GetProcessProposal(), grpc.WaitForReady(true))
|
||||
func (cli *grpcClient) LoadSnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
params types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.LoadSnapshotChunkAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetLoadSnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ExtendVote(ctx context.Context, req *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
return cli.client.ExtendVote(ctx, types.ToRequestExtendVote(req).GetExtendVote(), grpc.WaitForReady(true))
|
||||
}
|
||||
func (cli *grpcClient) ApplySnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
params types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
func (cli *grpcClient) VerifyVoteExtension(ctx context.Context, req *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
return cli.client.VerifyVoteExtension(ctx, types.ToRequestVerifyVoteExtension(req).GetVerifyVoteExtension(), grpc.WaitForReady(true))
|
||||
}
|
||||
|
||||
func (cli *grpcClient) FinalizeBlock(ctx context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
return cli.client.FinalizeBlock(ctx, types.ToRequestFinalizeBlock(req).GetFinalizeBlock(), grpc.WaitForReady(true))
|
||||
reqres, err := cli.ApplySnapshotChunkAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
package abcicli_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
func TestGRPC(t *testing.T) {
|
||||
app := types.NewBaseApplication()
|
||||
numCheckTxs := 2000
|
||||
socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
socket := fmt.Sprintf("unix://%v", socketFile)
|
||||
|
||||
// Start the listener
|
||||
server := abciserver.NewGRPCServer(socket, app)
|
||||
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
|
||||
err := server.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Connect to the socket
|
||||
//nolint:staticcheck // SA1019 Existing use of deprecated but supported dial option.
|
||||
conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := conn.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
client := types.NewABCIClient(conn)
|
||||
|
||||
// Write requests
|
||||
for counter := 0; counter < numCheckTxs; counter++ {
|
||||
// Send request
|
||||
response, err := client.CheckTx(context.Background(), &types.RequestCheckTx{Tx: []byte("test")})
|
||||
require.NoError(t, err)
|
||||
counter++
|
||||
if response.Code != 0 {
|
||||
t.Error("CheckTx failed with ret_code", response.Code)
|
||||
}
|
||||
if counter > numCheckTxs {
|
||||
t.Fatal("Too many CheckTx responses")
|
||||
}
|
||||
t.Log("response", counter)
|
||||
if counter == numCheckTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
}()
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return tmnet.Connect(addr)
|
||||
}
|
||||
@@ -4,8 +4,8 @@ import (
|
||||
"context"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
// NOTE: use defer to unlock mutex because Application might panic (e.g., in
|
||||
@@ -15,172 +15,347 @@ import (
|
||||
type localClient struct {
|
||||
service.BaseService
|
||||
|
||||
mtx *tmsync.Mutex
|
||||
mtx *tmsync.RWMutex
|
||||
types.Application
|
||||
Callback
|
||||
}
|
||||
|
||||
var _ Client = (*localClient)(nil)
|
||||
|
||||
// NewLocalClient creates a local client, which wraps the application interface that
|
||||
// Tendermint as the client will call to the application as the server. The only
|
||||
// difference, is that the local client has a global mutex which enforces serialization
|
||||
// of all the ABCI calls from Tendermint to the Application.
|
||||
func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client {
|
||||
// NewLocalClient creates a local client, which will be directly calling the
|
||||
// methods of the given app.
|
||||
//
|
||||
// Both Async and Sync methods ignore the given context.Context parameter.
|
||||
func NewLocalClient(mtx *tmsync.RWMutex, app types.Application) Client {
|
||||
if mtx == nil {
|
||||
mtx = new(tmsync.Mutex)
|
||||
mtx = &tmsync.RWMutex{}
|
||||
}
|
||||
|
||||
cli := &localClient{
|
||||
mtx: mtx,
|
||||
Application: app,
|
||||
}
|
||||
|
||||
cli.BaseService = *service.NewBaseService(nil, "localClient", cli)
|
||||
return cli
|
||||
}
|
||||
|
||||
func (app *localClient) SetResponseCallback(cb Callback) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
app.Callback = cb
|
||||
app.mtx.Unlock()
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxAsync(ctx context.Context, req *types.RequestCheckTx) (*ReqRes, error) {
|
||||
// TODO: change types.Application to include Error()?
|
||||
func (app *localClient) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *localClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
// Do nothing
|
||||
return newLocalReqRes(types.ToRequestFlush(), nil), nil
|
||||
}
|
||||
|
||||
func (app *localClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res, err := app.Application.CheckTx(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return app.callback(
|
||||
types.ToRequestEcho(msg),
|
||||
types.ToResponseEcho(msg),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
|
||||
res := app.Application.Info(req)
|
||||
return app.callback(
|
||||
types.ToRequestInfo(req),
|
||||
types.ToResponseInfo(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.DeliverTx(params)
|
||||
return app.callback(
|
||||
types.ToRequestDeliverTx(params),
|
||||
types.ToResponseDeliverTx(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.CheckTx(req)
|
||||
return app.callback(
|
||||
types.ToRequestCheckTx(req),
|
||||
types.ToResponseCheckTx(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
|
||||
res := app.Application.Query(req)
|
||||
return app.callback(
|
||||
types.ToRequestQuery(req),
|
||||
types.ToResponseQuery(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Commit()
|
||||
return app.callback(
|
||||
types.ToRequestCommit(),
|
||||
types.ToResponseCommit(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.InitChain(req)
|
||||
return app.callback(
|
||||
types.ToRequestInitChain(req),
|
||||
types.ToResponseInitChain(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.BeginBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestBeginBlock(req),
|
||||
types.ToResponseBeginBlock(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.EndBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestEndBlock(req),
|
||||
types.ToResponseEndBlock(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ListSnapshots(req)
|
||||
return app.callback(
|
||||
types.ToRequestListSnapshots(req),
|
||||
types.ToResponseListSnapshots(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.OfferSnapshot(req)
|
||||
return app.callback(
|
||||
types.ToRequestOfferSnapshot(req),
|
||||
types.ToResponseOfferSnapshot(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.LoadSnapshotChunk(req)
|
||||
return app.callback(
|
||||
types.ToRequestLoadSnapshotChunk(req),
|
||||
types.ToResponseLoadSnapshotChunk(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ApplySnapshotChunk(req)
|
||||
return app.callback(
|
||||
types.ToRequestApplySnapshotChunk(req),
|
||||
types.ToResponseApplySnapshotChunk(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) FlushSync(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *localClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: msg}, nil
|
||||
}
|
||||
|
||||
func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
|
||||
res := app.Application.Info(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.DeliverTx(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.CheckTx(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
|
||||
res := app.Application.Query(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Commit()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) InitChainSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.InitChain(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.BeginBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.EndBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ListSnapshots(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) OfferSnapshotSync(
|
||||
ctx context.Context,
|
||||
req types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.OfferSnapshot(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.LoadSnapshotChunk(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ApplySnapshotChunk(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {
|
||||
app.Callback(req, res)
|
||||
rr := newLocalReqRes(req, res)
|
||||
rr.callbackInvoked = true
|
||||
return rr
|
||||
return newLocalReqRes(req, res)
|
||||
}
|
||||
|
||||
func newLocalReqRes(req *types.Request, res *types.Response) *ReqRes {
|
||||
reqRes := NewReqRes(req)
|
||||
reqRes.Response = res
|
||||
reqRes.SetDone()
|
||||
return reqRes
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *localClient) Flush(context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *localClient) Echo(_ context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: msg}, nil
|
||||
}
|
||||
|
||||
func (app *localClient) Info(ctx context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.Info(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTx(ctx context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.CheckTx(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) Query(ctx context.Context, req *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.Query(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) Commit(ctx context.Context, req *types.RequestCommit) (*types.ResponseCommit, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.Commit(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) InitChain(ctx context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.InitChain(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshots(ctx context.Context, req *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.ListSnapshots(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) OfferSnapshot(ctx context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.OfferSnapshot(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunk(ctx context.Context,
|
||||
req *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.LoadSnapshotChunk(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunk(ctx context.Context,
|
||||
req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.ApplySnapshotChunk(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.PrepareProposal(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.ProcessProposal(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) ExtendVote(ctx context.Context, req *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.ExtendVote(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) VerifyVoteExtension(ctx context.Context, req *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.VerifyVoteExtension(ctx, req)
|
||||
}
|
||||
|
||||
func (app *localClient) FinalizeBlock(ctx context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.Application.FinalizeBlock(ctx, req)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery. DO NOT EDIT.
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
@@ -19,21 +19,21 @@ type Client struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// ApplySnapshotChunk provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
// ApplySnapshotChunkAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestApplySnapshotChunk) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -42,21 +42,67 @@ func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestAppl
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CheckTx provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
// ApplySnapshotChunkSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
var r0 *types.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCheckTx)
|
||||
r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCheckTx) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// BeginBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// BeginBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseBeginBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseBeginBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -66,11 +112,11 @@ func (_m *Client) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*type
|
||||
}
|
||||
|
||||
// CheckTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 *types.RequestCheckTx) (*abcicli.ReqRes, error) {
|
||||
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) *abcicli.ReqRes); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -79,7 +125,7 @@ func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 *types.RequestCheckTx) (
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCheckTx) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -88,13 +134,59 @@ func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 *types.RequestCheckTx) (
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Commit provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) Commit(_a0 context.Context, _a1 *types.RequestCommit) (*types.ResponseCommit, error) {
|
||||
// CheckTxSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseCommit
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCommit) *types.ResponseCommit); ok {
|
||||
var r0 *types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCheckTx)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CommitAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CommitAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CommitSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseCommit
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *types.ResponseCommit); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCommit)
|
||||
@@ -102,7 +194,30 @@ func (_m *Client) Commit(_a0 context.Context, _a1 *types.RequestCommit) (*types.
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCommit) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DeliverTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -111,13 +226,59 @@ func (_m *Client) Commit(_a0 context.Context, _a1 *types.RequestCommit) (*types.
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Echo provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) Echo(_a0 context.Context, _a1 string) (*types.ResponseEcho, error) {
|
||||
// DeliverTxSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseDeliverTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseDeliverTx)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EchoAsync provides a mock function with given fields: ctx, msg
|
||||
func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(ctx, msg)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *abcicli.ReqRes); ok {
|
||||
r0 = rf(ctx, msg)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, msg)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EchoSync provides a mock function with given fields: ctx, msg
|
||||
func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
ret := _m.Called(ctx, msg)
|
||||
|
||||
var r0 *types.ResponseEcho
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *types.ResponseEcho); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
r0 = rf(ctx, msg)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseEcho)
|
||||
@@ -126,6 +287,52 @@ func (_m *Client) Echo(_a0 context.Context, _a1 string) (*types.ResponseEcho, er
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, msg)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EndBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EndBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseEndBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseEndBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -148,22 +355,22 @@ func (_m *Client) Error() error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// ExtendVote provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// FlushAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseExtendVote
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) *types.ResponseExtendVote); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseExtendVote)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestExtendVote) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -171,31 +378,8 @@ func (_m *Client) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote)
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// FinalizeBlock provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseFinalizeBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseFinalizeBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestFinalizeBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Flush provides a mock function with given fields: _a0
|
||||
func (_m *Client) Flush(_a0 context.Context) error {
|
||||
// FlushSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) FlushSync(_a0 context.Context) error {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 error
|
||||
@@ -208,21 +392,21 @@ func (_m *Client) Flush(_a0 context.Context) error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// Info provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
// InfoAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) *types.ResponseInfo); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInfo)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInfo) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -231,12 +415,58 @@ func (_m *Client) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.Resp
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChain provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
// InfoSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *types.ResponseInfo); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInfo)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChainAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChainSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InitChainSync(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInitChain
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) *types.ResponseInitChain); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -245,7 +475,7 @@ func (_m *Client) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInitChain) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -268,12 +498,35 @@ func (_m *Client) IsRunning() bool {
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListSnapshots provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
// ListSnapshotsAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ListSnapshotsSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseListSnapshots
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -282,7 +535,7 @@ func (_m *Client) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnaps
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestListSnapshots) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -291,12 +544,35 @@ func (_m *Client) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnaps
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// LoadSnapshotChunk provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
// LoadSnapshotChunkAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// LoadSnapshotChunkSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseLoadSnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -305,7 +581,7 @@ func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadS
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestLoadSnapshotChunk) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -314,12 +590,35 @@ func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadS
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// OfferSnapshot provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
// OfferSnapshotAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// OfferSnapshotSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseOfferSnapshot
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -328,7 +627,7 @@ func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnap
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestOfferSnapshot) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -370,21 +669,21 @@ func (_m *Client) OnStop() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// PrepareProposal provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
// QueryAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponsePrepareProposal
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponsePrepareProposal)
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestPrepareProposal) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -393,35 +692,12 @@ func (_m *Client) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepare
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ProcessProposal provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseProcessProposal
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) *types.ResponseProcessProposal); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseProcessProposal)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestProcessProposal) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Query provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
// QuerySync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseQuery
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) *types.ResponseQuery); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *types.ResponseQuery); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -430,7 +706,7 @@ func (_m *Client) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.Re
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestQuery) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -521,40 +797,7 @@ func (_m *Client) String() string {
|
||||
return r0
|
||||
}
|
||||
|
||||
// VerifyVoteExtension provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseVerifyVoteExtension
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseVerifyVoteExtension)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestVerifyVoteExtension) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewClient interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewClient(t mockConstructorTestingTNewClient) *Client {
|
||||
mock := &Client{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
// Wait provides a mock function with given fields:
|
||||
func (_m *Client) Wait() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
@@ -8,26 +8,31 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"sync"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/libs/timer"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/libs/timer"
|
||||
)
|
||||
|
||||
const (
|
||||
reqQueueSize = 256 // TODO make configurable
|
||||
flushThrottleMS = 20 // Don't wait longer than...
|
||||
// reqQueueSize is the max number of queued async requests.
|
||||
// (memory: 256MB max assuming 1MB transactions)
|
||||
reqQueueSize = 256
|
||||
// Don't wait longer than...
|
||||
flushThrottleMS = 20
|
||||
)
|
||||
|
||||
// socketClient is the client side implementation of the Tendermint
|
||||
// Socket Protocol (TSP). It is used by an instance of Tendermint to pass
|
||||
// ABCI requests to an out of process application running the socketServer.
|
||||
//
|
||||
// This is goroutine-safe. All calls are serialized to the server through an unbuffered queue. The socketClient
|
||||
// tracks responses and expects them to respect the order of the requests sent.
|
||||
type reqResWithContext struct {
|
||||
R *ReqRes
|
||||
C context.Context // if context.Err is not nil, reqRes will be thrown away (ignored)
|
||||
}
|
||||
|
||||
// This is goroutine-safe, but users should beware that the application in
|
||||
// general is not meant to be interfaced with concurrent callers.
|
||||
type socketClient struct {
|
||||
service.BaseService
|
||||
|
||||
@@ -35,10 +40,10 @@ type socketClient struct {
|
||||
mustConnect bool
|
||||
conn net.Conn
|
||||
|
||||
reqQueue chan *ReqRes
|
||||
reqQueue chan *reqResWithContext
|
||||
flushTimer *timer.ThrottleTimer
|
||||
|
||||
mtx sync.Mutex
|
||||
mtx tmsync.RWMutex
|
||||
err error
|
||||
reqSent *list.List // list of requests sent, waiting for response
|
||||
resCb func(*types.Request, *types.Response) // called on all requests, if set.
|
||||
@@ -48,10 +53,10 @@ var _ Client = (*socketClient)(nil)
|
||||
|
||||
// NewSocketClient creates a new socket client, which connects to a given
|
||||
// address. If mustConnect is true, the client will return an error upon start
|
||||
// if it fails to connect else it will continue to retry.
|
||||
// if it fails to connect.
|
||||
func NewSocketClient(addr string, mustConnect bool) Client {
|
||||
cli := &socketClient{
|
||||
reqQueue: make(chan *ReqRes, reqQueueSize),
|
||||
reqQueue: make(chan *reqResWithContext, reqQueueSize),
|
||||
flushTimer: timer.NewThrottleTimer("socketClient", flushThrottleMS),
|
||||
mustConnect: mustConnect,
|
||||
|
||||
@@ -103,25 +108,19 @@ func (cli *socketClient) OnStop() {
|
||||
|
||||
// Error returns an error if the client was stopped abruptly.
|
||||
func (cli *socketClient) Error() error {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.mtx.RLock()
|
||||
defer cli.mtx.RUnlock()
|
||||
return cli.err
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
// SetResponseCallback sets a callback, which will be executed for each
|
||||
// non-error & non-empty response from the server.
|
||||
//
|
||||
// NOTE: callback may get internally generated flush responses.
|
||||
func (cli *socketClient) SetResponseCallback(resCb Callback) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.resCb = resCb
|
||||
cli.mtx.Unlock()
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxAsync(ctx context.Context, req *types.RequestCheckTx) (*ReqRes, error) {
|
||||
return cli.queueRequest(ctx, types.ToRequestCheckTx(req))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -131,19 +130,22 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
|
||||
for {
|
||||
select {
|
||||
case reqres := <-cli.reqQueue:
|
||||
// N.B. We must enqueue before sending out the request, otherwise the
|
||||
// server may reply before we do it, and the receiver will fail for an
|
||||
// unsolicited reply.
|
||||
cli.trackRequest(reqres)
|
||||
// cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request)
|
||||
|
||||
err := types.WriteMessage(reqres.Request, w)
|
||||
if reqres.C.Err() != nil {
|
||||
cli.Logger.Debug("Request's context is done", "req", reqres.R, "err", reqres.C.Err())
|
||||
continue
|
||||
}
|
||||
|
||||
cli.willSendReq(reqres.R)
|
||||
err := types.WriteMessage(reqres.R.Request, w)
|
||||
if err != nil {
|
||||
cli.stopForError(fmt.Errorf("write to buffer: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
// If it's a flush request, flush the current buffer.
|
||||
if _, ok := reqres.Request.Value.(*types.Request_Flush); ok {
|
||||
if _, ok := reqres.R.Request.Value.(*types.Request_Flush); ok {
|
||||
err = w.Flush()
|
||||
if err != nil {
|
||||
cli.stopForError(fmt.Errorf("flush buffer: %w", err))
|
||||
@@ -152,7 +154,7 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
|
||||
}
|
||||
case <-cli.flushTimer.Ch: // flush queue
|
||||
select {
|
||||
case cli.reqQueue <- NewReqRes(types.ToRequestFlush()):
|
||||
case cli.reqQueue <- &reqResWithContext{R: NewReqRes(types.ToRequestFlush()), C: context.Background()}:
|
||||
default:
|
||||
// Probably will fill the buffer, or retry later.
|
||||
}
|
||||
@@ -165,10 +167,6 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
|
||||
func (cli *socketClient) recvResponseRoutine(conn io.Reader) {
|
||||
r := bufio.NewReader(conn)
|
||||
for {
|
||||
if !cli.IsRunning() {
|
||||
return
|
||||
}
|
||||
|
||||
var res = &types.Response{}
|
||||
err := types.ReadMessage(r, res)
|
||||
if err != nil {
|
||||
@@ -176,6 +174,8 @@ func (cli *socketClient) recvResponseRoutine(conn io.Reader) {
|
||||
return
|
||||
}
|
||||
|
||||
// cli.Logger.Debug("Received response", "responseType", reflect.TypeOf(res), "response", res)
|
||||
|
||||
switch r := res.Value.(type) {
|
||||
case *types.Response_Exception: // app responded with error
|
||||
// XXX After setting cli.err, release waiters (e.g. reqres.Done())
|
||||
@@ -191,13 +191,7 @@ func (cli *socketClient) recvResponseRoutine(conn io.Reader) {
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *socketClient) trackRequest(reqres *ReqRes) {
|
||||
// N.B. We must NOT hold the client state lock while checking this, or we
|
||||
// may deadlock with shutdown.
|
||||
if !cli.IsRunning() {
|
||||
return
|
||||
}
|
||||
|
||||
func (cli *socketClient) willSendReq(reqres *ReqRes) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.reqSent.PushBack(reqres)
|
||||
@@ -210,12 +204,13 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
// Get the first ReqRes.
|
||||
next := cli.reqSent.Front()
|
||||
if next == nil {
|
||||
return fmt.Errorf("unexpected response %T when no call was made", res.Value)
|
||||
return fmt.Errorf("unexpected %v when nothing expected", reflect.TypeOf(res.Value))
|
||||
}
|
||||
|
||||
reqres := next.Value.(*ReqRes)
|
||||
if !resMatchesReq(reqres.Request, res) {
|
||||
return fmt.Errorf("unexpected response %T to the request %T", res.Value, reqres.Request.Value)
|
||||
return fmt.Errorf("unexpected %v when response to %v expected",
|
||||
reflect.TypeOf(res.Value), reflect.TypeOf(reqres.Request.Value))
|
||||
}
|
||||
|
||||
reqres.Response = res
|
||||
@@ -238,188 +233,263 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) Flush(ctx context.Context) error {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestFlush())
|
||||
func (cli *socketClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestEcho(msg))
|
||||
}
|
||||
|
||||
func (cli *socketClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestFlush())
|
||||
}
|
||||
|
||||
func (cli *socketClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestInfo(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestDeliverTx(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestQuery(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestCommit())
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestInitChain(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestBeginBlock(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestEndBlock(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestListSnapshots(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestOfferSnapshot(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) FlushSync(ctx context.Context) error {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestFlush(), true)
|
||||
if err != nil {
|
||||
return queueErr(err)
|
||||
}
|
||||
|
||||
if err := cli.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
reqRes.Wait()
|
||||
return nil
|
||||
|
||||
gotResp := make(chan struct{})
|
||||
go func() {
|
||||
// NOTE: if we don't flush the queue, its possible to get stuck here
|
||||
reqRes.Wait()
|
||||
close(gotResp)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-gotResp:
|
||||
return cli.Error()
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *socketClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestEcho(msg))
|
||||
func (cli *socketClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEcho(msg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetEcho(), cli.Error()
|
||||
return reqres.Response.GetEcho(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Info(ctx context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestInfo(req))
|
||||
func (cli *socketClient) InfoSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInfo,
|
||||
) (*types.ResponseInfo, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInfo(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetInfo(), cli.Error()
|
||||
return reqres.Response.GetInfo(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTx(ctx context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestCheckTx(req))
|
||||
func (cli *socketClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestDeliverTx(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetCheckTx(), cli.Error()
|
||||
return reqres.Response.GetDeliverTx(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Query(ctx context.Context, req *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestQuery(req))
|
||||
func (cli *socketClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCheckTx(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetQuery(), cli.Error()
|
||||
return reqres.Response.GetCheckTx(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Commit(ctx context.Context, req *types.RequestCommit) (*types.ResponseCommit, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestCommit())
|
||||
func (cli *socketClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestQuery(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetCommit(), cli.Error()
|
||||
return reqres.Response.GetQuery(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChain(ctx context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestInitChain(req))
|
||||
func (cli *socketClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCommit())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetInitChain(), cli.Error()
|
||||
return reqres.Response.GetCommit(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshots(ctx context.Context, req *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestListSnapshots(req))
|
||||
func (cli *socketClient) InitChainSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInitChain(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetListSnapshots(), cli.Error()
|
||||
return reqres.Response.GetInitChain(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshot(ctx context.Context, req *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestOfferSnapshot(req))
|
||||
func (cli *socketClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestBeginBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetOfferSnapshot(), cli.Error()
|
||||
return reqres.Response.GetBeginBlock(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunk(ctx context.Context, req *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
func (cli *socketClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEndBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetLoadSnapshotChunk(), cli.Error()
|
||||
return reqres.Response.GetEndBlock(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunk(ctx context.Context, req *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
func (cli *socketClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestListSnapshots(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetApplySnapshotChunk(), cli.Error()
|
||||
return reqres.Response.GetListSnapshots(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) PrepareProposal(ctx context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestPrepareProposal(req))
|
||||
func (cli *socketClient) OfferSnapshotSync(
|
||||
ctx context.Context,
|
||||
req types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestOfferSnapshot(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetPrepareProposal(), cli.Error()
|
||||
return reqres.Response.GetOfferSnapshot(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestProcessProposal(req))
|
||||
func (cli *socketClient) LoadSnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetProcessProposal(), cli.Error()
|
||||
return reqres.Response.GetLoadSnapshotChunk(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ExtendVote(ctx context.Context, req *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestExtendVote(req))
|
||||
func (cli *socketClient) ApplySnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetExtendVote(), cli.Error()
|
||||
return reqres.Response.GetApplySnapshotChunk(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) VerifyVoteExtension(ctx context.Context, req *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestVerifyVoteExtension(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetVerifyVoteExtension(), cli.Error()
|
||||
}
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) FinalizeBlock(ctx context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestFinalizeBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqRes.Response.GetFinalizeBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request) (*ReqRes, error) {
|
||||
// queueRequest enqueues req onto the queue. If the queue is full, it ether
|
||||
// returns an error (sync=false) or blocks (sync=true).
|
||||
//
|
||||
// When sync=true, ctx can be used to break early. When sync=false, ctx will be
|
||||
// used later to determine if request should be dropped (if ctx.Err is
|
||||
// non-nil).
|
||||
//
|
||||
// The caller is responsible for checking cli.Error.
|
||||
func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, sync bool) (*ReqRes, error) {
|
||||
reqres := NewReqRes(req)
|
||||
|
||||
// TODO: set cli.err if reqQueue times out
|
||||
select {
|
||||
case cli.reqQueue <- reqres:
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
if sync {
|
||||
select {
|
||||
case cli.reqQueue <- &reqResWithContext{R: reqres, C: context.Background()}:
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case cli.reqQueue <- &reqResWithContext{R: reqres, C: ctx}:
|
||||
default:
|
||||
return nil, errors.New("buffer is full")
|
||||
}
|
||||
}
|
||||
|
||||
// Maybe auto-flush, or unset auto-flush
|
||||
@@ -433,8 +503,40 @@ func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request) (
|
||||
return reqres, nil
|
||||
}
|
||||
|
||||
// flushQueue marks as complete and discards all remaining pending requests
|
||||
// from the queue.
|
||||
func (cli *socketClient) queueRequestAsync(
|
||||
ctx context.Context,
|
||||
req *types.Request,
|
||||
) (*ReqRes, error) {
|
||||
|
||||
reqres, err := cli.queueRequest(ctx, req, false)
|
||||
if err != nil {
|
||||
return nil, queueErr(err)
|
||||
}
|
||||
|
||||
return reqres, cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) queueRequestAndFlushSync(
|
||||
ctx context.Context,
|
||||
req *types.Request,
|
||||
) (*ReqRes, error) {
|
||||
|
||||
reqres, err := cli.queueRequest(ctx, req, true)
|
||||
if err != nil {
|
||||
return nil, queueErr(err)
|
||||
}
|
||||
|
||||
if err := cli.FlushSync(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres, cli.Error()
|
||||
}
|
||||
|
||||
func queueErr(e error) error {
|
||||
return fmt.Errorf("can't queue req: %w", e)
|
||||
}
|
||||
|
||||
func (cli *socketClient) flushQueue() {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
@@ -450,7 +552,7 @@ LOOP:
|
||||
for {
|
||||
select {
|
||||
case reqres := <-cli.reqQueue:
|
||||
reqres.Done()
|
||||
reqres.R.Done()
|
||||
default:
|
||||
break LOOP
|
||||
}
|
||||
@@ -467,6 +569,8 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_Flush)
|
||||
case *types.Request_Info:
|
||||
_, ok = res.Value.(*types.Response_Info)
|
||||
case *types.Request_DeliverTx:
|
||||
_, ok = res.Value.(*types.Response_DeliverTx)
|
||||
case *types.Request_CheckTx:
|
||||
_, ok = res.Value.(*types.Response_CheckTx)
|
||||
case *types.Request_Commit:
|
||||
@@ -475,6 +579,10 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_Query)
|
||||
case *types.Request_InitChain:
|
||||
_, ok = res.Value.(*types.Response_InitChain)
|
||||
case *types.Request_BeginBlock:
|
||||
_, ok = res.Value.(*types.Response_BeginBlock)
|
||||
case *types.Request_EndBlock:
|
||||
_, ok = res.Value.(*types.Response_EndBlock)
|
||||
case *types.Request_ApplySnapshotChunk:
|
||||
_, ok = res.Value.(*types.Response_ApplySnapshotChunk)
|
||||
case *types.Request_LoadSnapshotChunk:
|
||||
@@ -483,16 +591,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_ListSnapshots)
|
||||
case *types.Request_OfferSnapshot:
|
||||
_, ok = res.Value.(*types.Response_OfferSnapshot)
|
||||
case *types.Request_ExtendVote:
|
||||
_, ok = res.Value.(*types.Response_ExtendVote)
|
||||
case *types.Request_VerifyVoteExtension:
|
||||
_, ok = res.Value.(*types.Response_VerifyVoteExtension)
|
||||
case *types.Request_PrepareProposal:
|
||||
_, ok = res.Value.(*types.Response_PrepareProposal)
|
||||
case *types.Request_ProcessProposal:
|
||||
_, ok = res.Value.(*types.Response_ProcessProposal)
|
||||
case *types.Request_FinalizeBlock:
|
||||
_, ok = res.Value.(*types.Response_FinalizeBlock)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
@@ -503,12 +601,10 @@ func (cli *socketClient) stopForError(err error) {
|
||||
}
|
||||
|
||||
cli.mtx.Lock()
|
||||
if cli.err == nil {
|
||||
cli.err = err
|
||||
}
|
||||
cli.err = err
|
||||
cli.mtx.Unlock()
|
||||
|
||||
cli.Logger.Error(fmt.Sprintf("Stopping abci.socketClient for error: %v", err.Error()))
|
||||
cli.Logger.Info("Stopping abci.socketClient", "reason", err)
|
||||
if err := cli.Stop(); err != nil {
|
||||
cli.Logger.Error("Error stopping abci.socketClient", "err", err)
|
||||
}
|
||||
|
||||
@@ -3,34 +3,46 @@ package abcicli_test
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"math/rand"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
func TestCalls(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
app := types.BaseApplication{}
|
||||
var ctx = context.Background()
|
||||
|
||||
_, c := setupClientServer(t, app)
|
||||
func TestProperSyncCalls(t *testing.T) {
|
||||
app := slowApp{}
|
||||
|
||||
s, c := setupClientServer(t, app)
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := c.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
res, err := c.Echo(ctx, "hello")
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, res)
|
||||
// This is BeginBlockSync unrolled....
|
||||
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||
assert.NoError(t, err)
|
||||
err = c.FlushSync(context.Background())
|
||||
assert.NoError(t, err)
|
||||
res := reqres.Response.GetBeginBlock()
|
||||
assert.NotNil(t, res)
|
||||
resp <- c.Error()
|
||||
}()
|
||||
|
||||
@@ -43,25 +55,38 @@ func TestCalls(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestHangingAsyncCalls(t *testing.T) {
|
||||
func TestHangingSyncCalls(t *testing.T) {
|
||||
app := slowApp{}
|
||||
|
||||
s, c := setupClientServer(t, app)
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := c.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// Call CheckTx
|
||||
reqres, err := c.CheckTxAsync(context.Background(), &types.RequestCheckTx{})
|
||||
require.NoError(t, err)
|
||||
// wait 50 ms for all events to travel socket, but
|
||||
// Start BeginBlock and flush it
|
||||
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||
assert.NoError(t, err)
|
||||
flush, err := c.FlushAsync(ctx)
|
||||
assert.NoError(t, err)
|
||||
// wait 20 ms for all events to travel socket, but
|
||||
// no response yet from server
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
// kill the server, so the connections break
|
||||
err = s.Stop()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// wait for the response from CheckTx
|
||||
// wait for the response from BeginBlock
|
||||
reqres.Wait()
|
||||
flush.Wait()
|
||||
resp <- c.Error()
|
||||
}()
|
||||
|
||||
@@ -74,81 +99,21 @@ func TestHangingAsyncCalls(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBulk(t *testing.T) {
|
||||
const numTxs = 700000
|
||||
// use a socket instead of a port
|
||||
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
socket := fmt.Sprintf("unix://%v", socketFile)
|
||||
app := types.NewBaseApplication()
|
||||
// Start the listener
|
||||
server := server.NewSocketServer(socket, app)
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
err := server.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Connect to the socket
|
||||
client := abcicli.NewSocketClient(socket, false)
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
|
||||
err = client.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Construct request
|
||||
rfb := &types.RequestFinalizeBlock{Txs: make([][]byte, numTxs)}
|
||||
for counter := 0; counter < numTxs; counter++ {
|
||||
rfb.Txs[counter] = []byte("test")
|
||||
}
|
||||
// Send bulk request
|
||||
res, err := client.FinalizeBlock(context.Background(), rfb)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, numTxs, len(res.TxResults), "Number of txs doesn't match")
|
||||
for _, tx := range res.TxResults {
|
||||
require.Equal(t, uint32(0), tx.Code, "Tx failed")
|
||||
}
|
||||
|
||||
// Send final flush message
|
||||
err = client.Flush(context.Background())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func setupClientServer(t *testing.T, app types.Application) (
|
||||
service.Service, abcicli.Client) {
|
||||
t.Helper()
|
||||
|
||||
// some port between 20k and 30k
|
||||
port := 20000 + tmrand.Int32()%10000
|
||||
port := 20000 + rand.Int31()%10000
|
||||
addr := fmt.Sprintf("localhost:%d", port)
|
||||
|
||||
s := server.NewSocketServer(addr, app)
|
||||
err := s.Start()
|
||||
s, err := server.NewServer(addr, "socket", app)
|
||||
require.NoError(t, err)
|
||||
err = s.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := s.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
|
||||
c := abcicli.NewSocketClient(addr, true)
|
||||
err = c.Start()
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := c.Stop(); err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
})
|
||||
|
||||
return s, c
|
||||
}
|
||||
|
||||
@@ -156,83 +121,7 @@ type slowApp struct {
|
||||
types.BaseApplication
|
||||
}
|
||||
|
||||
func (slowApp) CheckTx(_ context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
time.Sleep(time.Second)
|
||||
return &types.ResponseCheckTx{}, nil
|
||||
}
|
||||
|
||||
// TestCallbackInvokedWhenSetLaet ensures that the callback is invoked when
|
||||
// set after the client completes the call into the app. Currently this
|
||||
// test relies on the callback being allowed to be invoked twice if set multiple
|
||||
// times, once when set early and once when set late.
|
||||
func TestCallbackInvokedWhenSetLate(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
app := blockedABCIApplication{
|
||||
wg: wg,
|
||||
}
|
||||
_, c := setupClientServer(t, app)
|
||||
reqRes, err := c.CheckTxAsync(ctx, &types.RequestCheckTx{})
|
||||
require.NoError(t, err)
|
||||
|
||||
done := make(chan struct{})
|
||||
cb := func(_ *types.Response) {
|
||||
close(done)
|
||||
}
|
||||
reqRes.SetCallback(cb)
|
||||
app.wg.Done()
|
||||
<-done
|
||||
|
||||
var called bool
|
||||
cb = func(_ *types.Response) {
|
||||
called = true
|
||||
}
|
||||
reqRes.SetCallback(cb)
|
||||
require.True(t, called)
|
||||
}
|
||||
|
||||
type blockedABCIApplication struct {
|
||||
wg *sync.WaitGroup
|
||||
types.BaseApplication
|
||||
}
|
||||
|
||||
func (b blockedABCIApplication) CheckTxAsync(ctx context.Context, r *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
b.wg.Wait()
|
||||
return b.BaseApplication.CheckTx(ctx, r)
|
||||
}
|
||||
|
||||
// TestCallbackInvokedWhenSetEarly ensures that the callback is invoked when
|
||||
// set before the client completes the call into the app.
|
||||
func TestCallbackInvokedWhenSetEarly(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
app := blockedABCIApplication{
|
||||
wg: wg,
|
||||
}
|
||||
_, c := setupClientServer(t, app)
|
||||
reqRes, err := c.CheckTxAsync(ctx, &types.RequestCheckTx{})
|
||||
require.NoError(t, err)
|
||||
|
||||
done := make(chan struct{})
|
||||
cb := func(_ *types.Response) {
|
||||
close(done)
|
||||
}
|
||||
reqRes.SetCallback(cb)
|
||||
app.wg.Done()
|
||||
|
||||
called := func() bool {
|
||||
select {
|
||||
case <-done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
require.Eventually(t, called, time.Second, time.Millisecond*25)
|
||||
func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
@@ -1,75 +0,0 @@
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
type unsyncLocalClient struct {
|
||||
service.BaseService
|
||||
|
||||
types.Application
|
||||
|
||||
// This mutex is exclusively used to protect the callback.
|
||||
mtx sync.RWMutex
|
||||
Callback
|
||||
}
|
||||
|
||||
var _ Client = (*unsyncLocalClient)(nil)
|
||||
|
||||
// NewUnsyncLocalClient creates an unsynchronized local client, which will be
|
||||
// directly calling the methods of the given app.
|
||||
//
|
||||
// Unlike NewLocalClient, it does not hold a mutex around the application, so
|
||||
// it is up to the application to manage its synchronization properly.
|
||||
func NewUnsyncLocalClient(app types.Application) Client {
|
||||
cli := &unsyncLocalClient{
|
||||
Application: app,
|
||||
}
|
||||
cli.BaseService = *service.NewBaseService(nil, "unsyncLocalClient", cli)
|
||||
return cli
|
||||
}
|
||||
|
||||
// TODO: change types.Application to include Error()?
|
||||
func (app *unsyncLocalClient) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) Flush(_ context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: msg}, nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *unsyncLocalClient) SetResponseCallback(cb Callback) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
app.Callback = cb
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) CheckTxAsync(ctx context.Context, req *types.RequestCheckTx) (*ReqRes, error) {
|
||||
res, err := app.Application.CheckTx(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return app.callback(
|
||||
types.ToRequestCheckTx(req),
|
||||
types.ToResponseCheckTx(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *unsyncLocalClient) callback(req *types.Request, res *types.Response) *ReqRes {
|
||||
app.mtx.RLock()
|
||||
defer app.mtx.RUnlock()
|
||||
app.Callback(req, res)
|
||||
rr := newLocalReqRes(req, res)
|
||||
rr.callbackInvoked = true
|
||||
return rr
|
||||
}
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -15,6 +16,7 @@ import (
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
servertest "github.com/tendermint/tendermint/abci/tests/server"
|
||||
@@ -27,6 +29,8 @@ import (
|
||||
var (
|
||||
client abcicli.Client
|
||||
logger log.Logger
|
||||
|
||||
ctx = context.Background()
|
||||
)
|
||||
|
||||
// flags
|
||||
@@ -53,17 +57,14 @@ var RootCmd = &cobra.Command{
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
switch cmd.Use {
|
||||
case "kvstore", "version", "help [command]":
|
||||
case "kvstore", "version":
|
||||
return nil
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
allowLevel, err := log.AllowLevel(flagLogLevel)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
logger = log.NewFilter(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), allowLevel)
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
}
|
||||
|
||||
if client == nil {
|
||||
var err error
|
||||
client, err = abcicli.NewClient(flagAddress, flagAbci, false)
|
||||
@@ -82,11 +83,10 @@ var RootCmd = &cobra.Command{
|
||||
// Structure for data passed to print response.
|
||||
type response struct {
|
||||
// generic abci response
|
||||
Data []byte
|
||||
Code uint32
|
||||
Info string
|
||||
Log string
|
||||
Status int32
|
||||
Data []byte
|
||||
Code uint32
|
||||
Info string
|
||||
Log string
|
||||
|
||||
Query *queryResponse
|
||||
}
|
||||
@@ -138,15 +138,13 @@ func addCommands() {
|
||||
RootCmd.AddCommand(consoleCmd)
|
||||
RootCmd.AddCommand(echoCmd)
|
||||
RootCmd.AddCommand(infoCmd)
|
||||
RootCmd.AddCommand(deliverTxCmd)
|
||||
RootCmd.AddCommand(checkTxCmd)
|
||||
RootCmd.AddCommand(commitCmd)
|
||||
RootCmd.AddCommand(versionCmd)
|
||||
RootCmd.AddCommand(testCmd)
|
||||
RootCmd.AddCommand(prepareProposalCmd)
|
||||
RootCmd.AddCommand(processProposalCmd)
|
||||
addQueryFlags()
|
||||
RootCmd.AddCommand(queryCmd)
|
||||
RootCmd.AddCommand(finalizeBlockCmd)
|
||||
|
||||
// examples
|
||||
addKVStoreFlags()
|
||||
@@ -167,9 +165,10 @@ where example.file looks something like:
|
||||
|
||||
check_tx 0x00
|
||||
check_tx 0xff
|
||||
finalize_block 0x00
|
||||
deliver_tx 0x00
|
||||
check_tx 0x00
|
||||
finalize_block 0x01 0x04 0xff
|
||||
deliver_tx 0x01
|
||||
deliver_tx 0x04
|
||||
info
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
@@ -185,7 +184,7 @@ This command opens an interactive console for running any of the other commands
|
||||
without opening a new connection each time
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
ValidArgs: []string{"echo", "info", "finalize_block", "check_tx", "prepare_proposal", "process_proposal", "commit", "query"},
|
||||
ValidArgs: []string{"echo", "info", "deliver_tx", "check_tx", "commit", "query"},
|
||||
RunE: cmdConsole,
|
||||
}
|
||||
|
||||
@@ -204,12 +203,12 @@ var infoCmd = &cobra.Command{
|
||||
RunE: cmdInfo,
|
||||
}
|
||||
|
||||
var finalizeBlockCmd = &cobra.Command{
|
||||
Use: "finalize_block",
|
||||
Short: "deliver a block of transactions to the application",
|
||||
Long: "deliver a block of transactions to the application",
|
||||
Args: cobra.MinimumNArgs(1),
|
||||
RunE: cmdFinalizeBlock,
|
||||
var deliverTxCmd = &cobra.Command{
|
||||
Use: "deliver_tx",
|
||||
Short: "deliver a new transaction to the application",
|
||||
Long: "deliver a new transaction to the application",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: cmdDeliverTx,
|
||||
}
|
||||
|
||||
var checkTxCmd = &cobra.Command{
|
||||
@@ -239,22 +238,6 @@ var versionCmd = &cobra.Command{
|
||||
},
|
||||
}
|
||||
|
||||
var prepareProposalCmd = &cobra.Command{
|
||||
Use: "prepare_proposal",
|
||||
Short: "prepare proposal",
|
||||
Long: "prepare proposal",
|
||||
Args: cobra.MinimumNArgs(0),
|
||||
RunE: cmdPrepareProposal,
|
||||
}
|
||||
|
||||
var processProposalCmd = &cobra.Command{
|
||||
Use: "process_proposal",
|
||||
Short: "process proposal",
|
||||
Long: "process proposal",
|
||||
Args: cobra.MinimumNArgs(0),
|
||||
RunE: cmdProcessProposal,
|
||||
}
|
||||
|
||||
var queryCmd = &cobra.Command{
|
||||
Use: "query",
|
||||
Short: "query the application state",
|
||||
@@ -309,55 +292,23 @@ func compose(fs []func() error) error {
|
||||
}
|
||||
|
||||
func cmdTest(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
return compose(
|
||||
[]func() error{
|
||||
func() error { return servertest.InitChain(ctx, client) },
|
||||
func() error { return servertest.Commit(ctx, client) },
|
||||
func() error { return servertest.InitChain(client) },
|
||||
func() error { return servertest.Commit(client, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil) },
|
||||
func() error { return servertest.Commit(client, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x00}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1}) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x01}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x00, 0x02}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x00, 0x03}, code.CodeTypeOK, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x04}, code.CodeTypeOK, nil) },
|
||||
func() error {
|
||||
return servertest.FinalizeBlock(ctx, client, [][]byte{
|
||||
[]byte("abc"),
|
||||
}, []uint32{
|
||||
kvstore.CodeTypeInvalidTxFormat,
|
||||
}, nil, nil)
|
||||
},
|
||||
func() error { return servertest.Commit(ctx, client) },
|
||||
func() error {
|
||||
return servertest.FinalizeBlock(ctx, client, [][]byte{
|
||||
{0x00},
|
||||
}, []uint32{
|
||||
kvstore.CodeTypeOK,
|
||||
}, nil, []byte{0, 0, 0, 0, 0, 0, 0, 1})
|
||||
},
|
||||
func() error { return servertest.Commit(ctx, client) },
|
||||
func() error {
|
||||
return servertest.FinalizeBlock(ctx, client, [][]byte{
|
||||
{0x00},
|
||||
{0x01},
|
||||
{0x00, 0x02},
|
||||
{0x00, 0x03},
|
||||
{0x00, 0x00, 0x04},
|
||||
{0x00, 0x00, 0x06},
|
||||
}, []uint32{
|
||||
kvstore.CodeTypeInvalidTxFormat,
|
||||
kvstore.CodeTypeOK,
|
||||
kvstore.CodeTypeOK,
|
||||
kvstore.CodeTypeOK,
|
||||
kvstore.CodeTypeOK,
|
||||
kvstore.CodeTypeInvalidTxFormat,
|
||||
}, nil, []byte{0, 0, 0, 0, 0, 0, 0, 5})
|
||||
},
|
||||
func() error { return servertest.Commit(ctx, client) },
|
||||
func() error {
|
||||
return servertest.PrepareProposal(ctx, client, [][]byte{
|
||||
{0x01},
|
||||
}, [][]byte{{0x01}}, nil)
|
||||
},
|
||||
func() error {
|
||||
return servertest.ProcessProposal(ctx, client, [][]byte{
|
||||
{0x01},
|
||||
}, types.ResponseProcessProposal_ACCEPT)
|
||||
return servertest.DeliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
},
|
||||
func() error { return servertest.Commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5}) },
|
||||
})
|
||||
}
|
||||
|
||||
@@ -450,18 +401,14 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
|
||||
return cmdCheckTx(cmd, actualArgs)
|
||||
case "commit":
|
||||
return cmdCommit(cmd, actualArgs)
|
||||
case "finalize_block":
|
||||
return cmdFinalizeBlock(cmd, actualArgs)
|
||||
case "deliver_tx":
|
||||
return cmdDeliverTx(cmd, actualArgs)
|
||||
case "echo":
|
||||
return cmdEcho(cmd, actualArgs)
|
||||
case "info":
|
||||
return cmdInfo(cmd, actualArgs)
|
||||
case "query":
|
||||
return cmdQuery(cmd, actualArgs)
|
||||
case "prepare_proposal":
|
||||
return cmdPrepareProposal(cmd, actualArgs)
|
||||
case "process_proposal":
|
||||
return cmdProcessProposal(cmd, actualArgs)
|
||||
default:
|
||||
return cmdUnimplemented(cmd, pArgs)
|
||||
}
|
||||
@@ -482,7 +429,7 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("%s: %s\n", echoCmd.Use, echoCmd.Short)
|
||||
fmt.Printf("%s: %s\n", infoCmd.Use, infoCmd.Short)
|
||||
fmt.Printf("%s: %s\n", checkTxCmd.Use, checkTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", finalizeBlockCmd.Use, finalizeBlockCmd.Short)
|
||||
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short)
|
||||
fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short)
|
||||
fmt.Println("Use \"[command] --help\" for more information about a command.")
|
||||
@@ -496,15 +443,13 @@ func cmdEcho(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
msg = args[0]
|
||||
}
|
||||
res, err := client.Echo(cmd.Context(), msg)
|
||||
res, err := client.EchoSync(ctx, msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printResponse(cmd, args, response{
|
||||
Data: []byte(res.Message),
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -514,7 +459,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 1 {
|
||||
version = args[0]
|
||||
}
|
||||
res, err := client.Info(cmd.Context(), &types.RequestInfo{Version: version})
|
||||
res, err := client.InfoSync(ctx, types.RequestInfo{Version: version})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -526,40 +471,29 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
|
||||
|
||||
const codeBad uint32 = 10
|
||||
|
||||
// Append new txs to application
|
||||
func cmdFinalizeBlock(cmd *cobra.Command, args []string) error {
|
||||
// Append a new tx to application
|
||||
func cmdDeliverTx(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
printResponse(cmd, args, response{
|
||||
Code: codeBad,
|
||||
Log: "Must provide at least one transaction",
|
||||
Log: "want the tx",
|
||||
})
|
||||
return nil
|
||||
}
|
||||
txs := make([][]byte, len(args))
|
||||
for i, arg := range args {
|
||||
txBytes, err := stringOrHexToBytes(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txs[i] = txBytes
|
||||
}
|
||||
res, err := client.FinalizeBlock(cmd.Context(), &types.RequestFinalizeBlock{Txs: txs})
|
||||
txBytes, err := stringOrHexToBytes(args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resps := make([]response, 0, len(res.TxResults)+1)
|
||||
for _, tx := range res.TxResults {
|
||||
resps = append(resps, response{
|
||||
Code: tx.Code,
|
||||
Data: tx.Data,
|
||||
Info: tx.Info,
|
||||
Log: tx.Log,
|
||||
})
|
||||
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resps = append(resps, response{
|
||||
Data: res.AgreedAppData,
|
||||
printResponse(cmd, args, response{
|
||||
Code: res.Code,
|
||||
Data: res.Data,
|
||||
Info: res.Info,
|
||||
Log: res.Log,
|
||||
})
|
||||
printResponse(cmd, args, resps...)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -576,7 +510,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.CheckTx(cmd.Context(), &types.RequestCheckTx{Tx: txBytes})
|
||||
res, err := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -591,11 +525,13 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Get application Merkle root hash
|
||||
func cmdCommit(cmd *cobra.Command, args []string) error {
|
||||
_, err := client.Commit(cmd.Context(), &types.RequestCommit{})
|
||||
res, err := client.CommitSync(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
printResponse(cmd, args, response{})
|
||||
printResponse(cmd, args, response{
|
||||
Data: res.Data,
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -614,7 +550,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resQuery, err := client.Query(cmd.Context(), &types.RequestQuery{
|
||||
resQuery, err := client.QuerySync(ctx, types.RequestQuery{
|
||||
Data: queryBytes,
|
||||
Path: flagPath,
|
||||
Height: int64(flagHeight),
|
||||
@@ -637,74 +573,17 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdPrepareProposal(cmd *cobra.Command, args []string) error {
|
||||
txsBytesArray := make([][]byte, len(args))
|
||||
|
||||
for i, arg := range args {
|
||||
txBytes, err := stringOrHexToBytes(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txsBytesArray[i] = txBytes
|
||||
}
|
||||
|
||||
res, err := client.PrepareProposal(cmd.Context(), &types.RequestPrepareProposal{
|
||||
Txs: txsBytesArray,
|
||||
// kvstore has to have this parameter in order not to reject a tx as the default value is 0
|
||||
MaxTxBytes: 65536,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resps := make([]response, 0, len(res.Txs))
|
||||
for _, tx := range res.Txs {
|
||||
resps = append(resps, response{
|
||||
Code: 0, // CodeOK
|
||||
Log: "Succeeded. Tx: " + string(tx),
|
||||
})
|
||||
}
|
||||
|
||||
printResponse(cmd, args, resps...)
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdProcessProposal(cmd *cobra.Command, args []string) error {
|
||||
txsBytesArray := make([][]byte, len(args))
|
||||
|
||||
for i, arg := range args {
|
||||
txBytes, err := stringOrHexToBytes(arg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
txsBytesArray[i] = txBytes
|
||||
}
|
||||
|
||||
res, err := client.ProcessProposal(cmd.Context(), &types.RequestProcessProposal{
|
||||
Txs: txsBytesArray,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
printResponse(cmd, args, response{
|
||||
Status: int32(res.Status),
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
|
||||
// Create the application - in memory or persisted to disk
|
||||
var app types.Application
|
||||
if flagPersist == "" {
|
||||
var err error
|
||||
flagPersist, err = os.MkdirTemp("", "persistent_kvstore_tmp")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
app = kvstore.NewApplication()
|
||||
} else {
|
||||
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
|
||||
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
|
||||
}
|
||||
app = kvstore.NewPersistentApplication(flagPersist)
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddress, flagAbci, app)
|
||||
@@ -730,49 +609,44 @@ func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
func printResponse(cmd *cobra.Command, args []string, rsps ...response) {
|
||||
func printResponse(cmd *cobra.Command, args []string, rsp response) {
|
||||
|
||||
if flagVerbose {
|
||||
fmt.Println(">", cmd.Use, strings.Join(args, " "))
|
||||
}
|
||||
|
||||
for _, rsp := range rsps {
|
||||
// Always print the status code.
|
||||
if rsp.Code == types.CodeTypeOK {
|
||||
fmt.Printf("-> code: OK\n")
|
||||
} else {
|
||||
fmt.Printf("-> code: %d\n", rsp.Code)
|
||||
// Always print the status code.
|
||||
if rsp.Code == types.CodeTypeOK {
|
||||
fmt.Printf("-> code: OK\n")
|
||||
} else {
|
||||
fmt.Printf("-> code: %d\n", rsp.Code)
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
if len(rsp.Data) != 0 {
|
||||
// Do no print this line when using the finalize_block command
|
||||
// because the string comes out as gibberish
|
||||
if cmd.Use != "finalize_block" {
|
||||
fmt.Printf("-> data: %s\n", rsp.Data)
|
||||
}
|
||||
fmt.Printf("-> data.hex: 0x%X\n", rsp.Data)
|
||||
}
|
||||
if rsp.Log != "" {
|
||||
fmt.Printf("-> log: %s\n", rsp.Log)
|
||||
}
|
||||
if cmd.Use == "process_proposal" {
|
||||
fmt.Printf("-> status: %s\n", types.ResponseProcessProposal_ProposalStatus_name[rsp.Status])
|
||||
if len(rsp.Data) != 0 {
|
||||
// Do no print this line when using the commit command
|
||||
// because the string comes out as gibberish
|
||||
if cmd.Use != "commit" {
|
||||
fmt.Printf("-> data: %s\n", rsp.Data)
|
||||
}
|
||||
fmt.Printf("-> data.hex: 0x%X\n", rsp.Data)
|
||||
}
|
||||
if rsp.Log != "" {
|
||||
fmt.Printf("-> log: %s\n", rsp.Log)
|
||||
}
|
||||
|
||||
if rsp.Query != nil {
|
||||
fmt.Printf("-> height: %d\n", rsp.Query.Height)
|
||||
if rsp.Query.Key != nil {
|
||||
fmt.Printf("-> key: %s\n", rsp.Query.Key)
|
||||
fmt.Printf("-> key.hex: %X\n", rsp.Query.Key)
|
||||
}
|
||||
if rsp.Query.Value != nil {
|
||||
fmt.Printf("-> value: %s\n", rsp.Query.Value)
|
||||
fmt.Printf("-> value.hex: %X\n", rsp.Query.Value)
|
||||
}
|
||||
if rsp.Query.ProofOps != nil {
|
||||
fmt.Printf("-> proof: %#v\n", rsp.Query.ProofOps)
|
||||
}
|
||||
if rsp.Query != nil {
|
||||
fmt.Printf("-> height: %d\n", rsp.Query.Height)
|
||||
if rsp.Query.Key != nil {
|
||||
fmt.Printf("-> key: %s\n", rsp.Query.Key)
|
||||
fmt.Printf("-> key.hex: %X\n", rsp.Query.Key)
|
||||
}
|
||||
if rsp.Query.Value != nil {
|
||||
fmt.Printf("-> value: %s\n", rsp.Query.Value)
|
||||
fmt.Printf("-> value.hex: %X\n", rsp.Query.Value)
|
||||
}
|
||||
if rsp.Query.ProofOps != nil {
|
||||
fmt.Printf("-> proof: %#v\n", rsp.Query.ProofOps)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
10
abci/example/code/code.go
Normal file
10
abci/example/code/code.go
Normal file
@@ -0,0 +1,10 @@
|
||||
package code
|
||||
|
||||
// Return codes for the examples
|
||||
const (
|
||||
CodeTypeOK uint32 = 0
|
||||
CodeTypeEncodingError uint32 = 1
|
||||
CodeTypeBadNonce uint32 = 2
|
||||
CodeTypeUnauthorized uint32 = 3
|
||||
CodeTypeUnknownError uint32 = 4
|
||||
)
|
||||
3
abci/example/example.go
Normal file
3
abci/example/example.go
Normal file
@@ -0,0 +1,3 @@
|
||||
package example
|
||||
|
||||
// so the go tool doesn't return errors about no buildable go files ...
|
||||
185
abci/example/example_test.go
Normal file
185
abci/example/example_test.go
Normal file
@@ -0,0 +1,185 @@
|
||||
package example
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func TestKVStore(t *testing.T) {
|
||||
fmt.Println("### Testing KVStore")
|
||||
testStream(t, kvstore.NewApplication())
|
||||
}
|
||||
|
||||
func TestBaseApp(t *testing.T) {
|
||||
fmt.Println("### Testing BaseApp")
|
||||
testStream(t, types.NewBaseApplication())
|
||||
}
|
||||
|
||||
func TestGRPC(t *testing.T) {
|
||||
fmt.Println("### Testing GRPC")
|
||||
testGRPCSync(t, types.NewGRPCApplication(types.NewBaseApplication()))
|
||||
}
|
||||
|
||||
func testStream(t *testing.T, app types.Application) {
|
||||
const numDeliverTxs = 20000
|
||||
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
socket := fmt.Sprintf("unix://%v", socketFile)
|
||||
|
||||
// Start the listener
|
||||
server := abciserver.NewSocketServer(socket, app)
|
||||
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
|
||||
err := server.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Connect to the socket
|
||||
client := abcicli.NewSocketClient(socket, false)
|
||||
client.SetLogger(log.TestingLogger().With("module", "abci-client"))
|
||||
err = client.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
done := make(chan struct{})
|
||||
counter := 0
|
||||
client.SetResponseCallback(func(req *types.Request, res *types.Response) {
|
||||
// Process response
|
||||
switch r := res.Value.(type) {
|
||||
case *types.Response_DeliverTx:
|
||||
counter++
|
||||
if r.DeliverTx.Code != code.CodeTypeOK {
|
||||
t.Error("DeliverTx failed with ret_code", r.DeliverTx.Code)
|
||||
}
|
||||
if counter > numDeliverTxs {
|
||||
t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs)
|
||||
}
|
||||
if counter == numDeliverTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
close(done)
|
||||
}()
|
||||
return
|
||||
}
|
||||
case *types.Response_Flush:
|
||||
// ignore
|
||||
default:
|
||||
t.Error("Unexpected response type", reflect.TypeOf(res.Value))
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
// Send request
|
||||
_, err = client.DeliverTxAsync(ctx, types.RequestDeliverTx{Tx: []byte("test")})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Sometimes send flush messages
|
||||
if counter%128 == 0 {
|
||||
err = client.FlushSync(context.Background())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Send final flush message
|
||||
_, err = client.FlushAsync(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
<-done
|
||||
}
|
||||
|
||||
//-------------------------
|
||||
// test grpc
|
||||
|
||||
func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return tmnet.Connect(addr)
|
||||
}
|
||||
|
||||
func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
|
||||
numDeliverTxs := 2000
|
||||
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
socket := fmt.Sprintf("unix://%v", socketFile)
|
||||
|
||||
// Start the listener
|
||||
server := abciserver.NewGRPCServer(socket, app)
|
||||
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
|
||||
if err := server.Start(); err != nil {
|
||||
t.Fatalf("Error starting GRPC server: %v", err.Error())
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Connect to the socket
|
||||
conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
t.Fatalf("Error dialing GRPC server: %v", err.Error())
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := conn.Close(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
client := types.NewABCIApplicationClient(conn)
|
||||
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
// Send request
|
||||
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")})
|
||||
if err != nil {
|
||||
t.Fatalf("Error in GRPC DeliverTx: %v", err.Error())
|
||||
}
|
||||
counter++
|
||||
if response.Code != code.CodeTypeOK {
|
||||
t.Error("DeliverTx failed with ret_code", response.Code)
|
||||
}
|
||||
if counter > numDeliverTxs {
|
||||
t.Fatal("Too many DeliverTx responses")
|
||||
}
|
||||
t.Log("response", counter)
|
||||
if counter == numDeliverTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
}()
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,24 @@
|
||||
# KVStore
|
||||
|
||||
The KVStoreApplication is a simple merkle key-value store.
|
||||
There are two app's here: the KVStoreApplication and the PersistentKVStoreApplication.
|
||||
|
||||
## KVStoreApplication
|
||||
|
||||
The KVStoreApplication is a simple merkle key-value store.
|
||||
Transactions of the form `key=value` are stored as key-value pairs in the tree.
|
||||
Transactions without an `=` sign set the value to the key.
|
||||
The app has no replay protection (other than what the mempool provides).
|
||||
|
||||
## PersistentKVStoreApplication
|
||||
|
||||
The PersistentKVStoreApplication wraps the KVStoreApplication
|
||||
and provides two additional features:
|
||||
|
||||
1) persistence of state across app restarts (using Tendermint's ABCI-Handshake mechanism)
|
||||
2) validator set changes
|
||||
|
||||
The state is persisted in leveldb along with the last block committed,
|
||||
and the Handshake allows any necessary blocks to be replayed.
|
||||
Validator set changes are effected using the following transaction format:
|
||||
|
||||
```md
|
||||
@@ -13,4 +27,4 @@ Validator set changes are effected using the following transaction format:
|
||||
|
||||
where `pubkeyN` is a base64-encoded 32-byte ed25519 key and `powerN` is a new voting power for the validator with `pubkeyN` (possibly a new one).
|
||||
To remove a validator from the validator set, set power to `0`.
|
||||
There is no sybil protection against new validators joining.
|
||||
There is no sybil protection against new validators joining.
|
||||
|
||||
@@ -1,10 +0,0 @@
|
||||
package kvstore
|
||||
|
||||
// Return codes for the examples
|
||||
const (
|
||||
CodeTypeOK uint32 = 0
|
||||
CodeTypeEncodingError uint32 = 1
|
||||
CodeTypeInvalidTxFormat uint32 = 2
|
||||
CodeTypeUnauthorized uint32 = 3
|
||||
CodeTypeExecuted uint32 = 5
|
||||
)
|
||||
@@ -1,22 +1,18 @@
|
||||
package kvstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strings"
|
||||
mrand "math/rand"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cryptoencoding "github.com/tendermint/tendermint/crypto/encoding"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
)
|
||||
|
||||
// RandVal creates one random validator, with a key derived
|
||||
// from the input value
|
||||
func RandVal(i int) types.ValidatorUpdate {
|
||||
pubkey := tmrand.Bytes(32)
|
||||
power := tmrand.Uint16() + 1
|
||||
// Random value between [0, 2^16 - 1]
|
||||
power := mrand.Uint32() & (1<<16 - 1) // nolint:gosec // G404: Use of weak random number generator
|
||||
v := types.UpdateValidator(pubkey, int64(power), "")
|
||||
return v
|
||||
}
|
||||
@@ -36,44 +32,8 @@ func RandVals(cnt int) []types.ValidatorUpdate {
|
||||
// InitKVStore initializes the kvstore app with some data,
|
||||
// which allows tests to pass and is fine as long as you
|
||||
// don't make any tx that modify the validator state
|
||||
func InitKVStore(ctx context.Context, app *Application) error {
|
||||
_, err := app.InitChain(ctx, &types.RequestInitChain{
|
||||
func InitKVStore(app *PersistentKVStoreApplication) {
|
||||
app.InitChain(types.RequestInitChain{
|
||||
Validators: RandVals(1),
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// Create a new transaction
|
||||
func NewTx(key, value string) []byte {
|
||||
return []byte(strings.Join([]string{key, value}, "="))
|
||||
}
|
||||
|
||||
func NewRandomTx(size int) []byte {
|
||||
if size < 4 {
|
||||
panic("random tx size must be greater than 3")
|
||||
}
|
||||
return NewTx(tmrand.Str(2), tmrand.Str(size-3))
|
||||
}
|
||||
|
||||
func NewRandomTxs(n int) [][]byte {
|
||||
txs := make([][]byte, n)
|
||||
for i := 0; i < n; i++ {
|
||||
txs[i] = NewRandomTx(10)
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func NewTxFromID(i int) []byte {
|
||||
return []byte(fmt.Sprintf("%d=%d", i, i))
|
||||
}
|
||||
|
||||
// Create a transaction to add/remove/update a validator
|
||||
// To remove, set power to 0.
|
||||
func MakeValSetChangeTx(pubkey crypto.PublicKey, power int64) []byte {
|
||||
pk, err := cryptoencoding.PubKeyFromProto(pubkey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pubStr := base64.StdEncoding.EncodeToString(pk.Bytes())
|
||||
return []byte(fmt.Sprintf("%s%s!%d", ValidatorPrefix, pubStr, power))
|
||||
}
|
||||
|
||||
@@ -2,426 +2,29 @@ package kvstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/binary"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cryptoencoding "github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
var (
|
||||
stateKey = []byte("stateKey")
|
||||
kvPairPrefixKey = []byte("kvPairKey:")
|
||||
|
||||
ProtocolVersion uint64 = 0x1
|
||||
)
|
||||
|
||||
const (
|
||||
ValidatorPrefix = "val="
|
||||
AppVersion uint64 = 1
|
||||
)
|
||||
|
||||
var _ types.Application = (*Application)(nil)
|
||||
|
||||
// Application is the kvstore state machine. It complies with the abci.Application interface.
|
||||
// It takes transactions in the form of key=value and saves them in a database. This is
|
||||
// a somewhat trivial example as there is no real state execution
|
||||
type Application struct {
|
||||
types.BaseApplication
|
||||
|
||||
state State
|
||||
RetainBlocks int64 // blocks to retain after commit (via ResponseCommit.RetainHeight)
|
||||
stagedTxs [][]byte
|
||||
logger log.Logger
|
||||
|
||||
// validator set
|
||||
valUpdates []types.ValidatorUpdate
|
||||
valAddrToPubKeyMap map[string]cryptoproto.PublicKey
|
||||
}
|
||||
|
||||
// NewApplication creates an instance of the kvstore from the provided database
|
||||
func NewApplication(db dbm.DB) *Application {
|
||||
return &Application{
|
||||
logger: log.NewNopLogger(),
|
||||
state: loadState(db),
|
||||
valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey),
|
||||
}
|
||||
}
|
||||
|
||||
// NewPersistentApplication creates a new application using the goleveldb database engine
|
||||
func NewPersistentApplication(dbDir string) *Application {
|
||||
name := "kvstore"
|
||||
db, err := dbm.NewGoLevelDB(name, dbDir)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to create persistent app at %s: %w", dbDir, err))
|
||||
}
|
||||
return NewApplication(db)
|
||||
}
|
||||
|
||||
// NewInMemoryApplication creates a new application from an in memory database.
|
||||
// Nothing will be persisted.
|
||||
func NewInMemoryApplication() *Application {
|
||||
return NewApplication(dbm.NewMemDB())
|
||||
}
|
||||
|
||||
// Info returns information about the state of the application. This is generally used everytime a Tendermint instance
|
||||
// begins and let's the application know what Tendermint versions it's interacting with. Based from this information,
|
||||
// Tendermint will ensure it is in sync with the application by potentially replaying the blocks it has. If the
|
||||
// Application returns a 0 appBlockHeight, Tendermint will call InitChain to initialize the application with consensus related data
|
||||
func (app *Application) Info(_ context.Context, req *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
// Tendermint expects the application to persist validators, on start-up we need to reload them to memory if they exist
|
||||
if len(app.valAddrToPubKeyMap) == 0 && app.state.Height > 0 {
|
||||
validators := app.getValidators()
|
||||
for _, v := range validators {
|
||||
pubkey, err := cryptoencoding.PubKeyFromProto(v.PubKey)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("can't decode public key: %w", err))
|
||||
}
|
||||
app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey
|
||||
}
|
||||
}
|
||||
|
||||
return &types.ResponseInfo{
|
||||
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
|
||||
Version: version.ABCIVersion,
|
||||
AppVersion: AppVersion,
|
||||
LastBlockHeight: app.state.Height,
|
||||
LastBlockAppHash: app.state.Hash(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// InitChain takes the genesis validators and stores them in the kvstore. It returns the application hash in the
|
||||
// case that the application starts prepopulated with values. This method is called whenever a new instance of the application
|
||||
// starts (i.e. app height = 0).
|
||||
func (app *Application) InitChain(_ context.Context, req *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
for _, v := range req.Validators {
|
||||
app.updateValidator(v)
|
||||
}
|
||||
appHash := make([]byte, 8)
|
||||
binary.PutVarint(appHash, app.state.Size)
|
||||
return &types.ResponseInitChain{
|
||||
AppHash: appHash,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CheckTx handles inbound transactions or in the case of recheckTx assesses old transaction validity after a state transition.
|
||||
// As this is called frequently, it's preferably to keep the check as stateless and as quick as possible.
|
||||
// Here we check that the transaction has the correctly key=value format.
|
||||
// For the KVStore we check that each transaction has the valid tx format:
|
||||
// - Contains one and only one `=`
|
||||
// - `=` is not the first or last byte.
|
||||
// - if key is `val` that the validator update transaction is also valid
|
||||
func (app *Application) CheckTx(_ context.Context, req *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
// If it is a validator update transaction, check that it is correctly formatted
|
||||
if isValidatorTx(req.Tx) {
|
||||
if _, _, err := parseValidatorTx(req.Tx); err != nil {
|
||||
return &types.ResponseCheckTx{Code: CodeTypeInvalidTxFormat}, nil
|
||||
}
|
||||
} else if !isValidTx(req.Tx) {
|
||||
return &types.ResponseCheckTx{Code: CodeTypeInvalidTxFormat}, nil
|
||||
}
|
||||
|
||||
return &types.ResponseCheckTx{Code: CodeTypeOK, GasWanted: 1}, nil
|
||||
}
|
||||
|
||||
// Tx must have a format like key:value or key=value. That is:
|
||||
// - it must have one and only one ":" or "="
|
||||
// - It must not begin or end with these special characters
|
||||
func isValidTx(tx []byte) bool {
|
||||
if bytes.Count(tx, []byte(":")) == 1 && bytes.Count(tx, []byte("=")) == 0 {
|
||||
if !bytes.HasPrefix(tx, []byte(":")) && !bytes.HasSuffix(tx, []byte(":")) {
|
||||
return true
|
||||
}
|
||||
} else if bytes.Count(tx, []byte("=")) == 1 && bytes.Count(tx, []byte(":")) == 0 {
|
||||
if !bytes.HasPrefix(tx, []byte("=")) && !bytes.HasSuffix(tx, []byte("=")) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// PrepareProposal is called when the node is a proposer. Tendermint stages a set of transactions to the application. As the
|
||||
// KVStore has two accepted formats, `:` and `=`, we modify all instances of `:` with `=` to make it consistent. Note: this is
|
||||
// quite a trivial example of transaction modification.
|
||||
// NOTE: we assume that Tendermint will never provide more transactions than can fit in a block.
|
||||
func (app *Application) PrepareProposal(_ context.Context, req *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
return &types.ResponsePrepareProposal{Txs: formatTxs(req.Txs)}, nil
|
||||
}
|
||||
|
||||
// formatTxs substitutes all the transactions with x:y to x=y
|
||||
func formatTxs(blockData [][]byte) [][]byte {
|
||||
txs := make([][]byte, len(blockData))
|
||||
for idx, tx := range blockData {
|
||||
txs[idx] = bytes.Replace(tx, []byte(":"), []byte("="), 1)
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
// ProcessProposal is called whenever a node receives a complete proposal. It allows the application to validate the proposal.
|
||||
// Only validators who can vote will have this method called. For the KVstore we reuse CheckTx.
|
||||
func (app *Application) ProcessProposal(ctx context.Context, req *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
for _, tx := range req.Txs {
|
||||
// As CheckTx is a full validity check we can simply reuse this
|
||||
if resp, err := app.CheckTx(ctx, &types.RequestCheckTx{Tx: tx}); resp.Code != CodeTypeOK || err != nil {
|
||||
return &types.ResponseProcessProposal{Status: types.ResponseProcessProposal_REJECT}, nil
|
||||
}
|
||||
}
|
||||
return &types.ResponseProcessProposal{Status: types.ResponseProcessProposal_ACCEPT}, nil
|
||||
}
|
||||
|
||||
// FinalizeBlock executes the block against the application state. It punishes validators who equivocated and
|
||||
// updates validators according to transactions in a block. The rest of the transactions are regular key value
|
||||
// updates and are cached in memory and will be persisted once Commit is called.
|
||||
// ConsensusParams are never changed.
|
||||
func (app *Application) FinalizeBlock(_ context.Context, req *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
// reset valset changes
|
||||
app.valUpdates = make([]types.ValidatorUpdate, 0)
|
||||
app.stagedTxs = make([][]byte, 0)
|
||||
|
||||
// Punish validators who committed equivocation.
|
||||
for _, ev := range req.Misbehavior {
|
||||
if ev.Type == types.MisbehaviorType_DUPLICATE_VOTE {
|
||||
addr := string(ev.Validator.Address)
|
||||
if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok {
|
||||
app.valUpdates = append(app.valUpdates, types.ValidatorUpdate{
|
||||
PubKey: pubKey,
|
||||
Power: ev.Validator.Power - 1,
|
||||
})
|
||||
app.logger.Info("Decreased val power by 1 because of the equivocation",
|
||||
"val", addr)
|
||||
} else {
|
||||
panic(fmt.Errorf("wanted to punish val %q but can't find it", addr))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
respTxs := make([]*types.ExecTxResult, len(req.Txs))
|
||||
for i, tx := range req.Txs {
|
||||
if isValidatorTx(tx) {
|
||||
pubKey, power, err := parseValidatorTx(tx)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.valUpdates = append(app.valUpdates, types.UpdateValidator(pubKey, power, ""))
|
||||
} else {
|
||||
app.stagedTxs = append(app.stagedTxs, tx)
|
||||
}
|
||||
respTxs[i] = &types.ExecTxResult{
|
||||
Code: CodeTypeOK,
|
||||
// With every transaction we can emit a series of events. To make it simple, we just emit the same events.
|
||||
Events: []types.Event{
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []types.EventAttribute{
|
||||
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
|
||||
{Key: "index_key", Value: "index is working", Index: true},
|
||||
{Key: "noindex_key", Value: "index is working", Index: false},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
app.state.Size++
|
||||
}
|
||||
|
||||
app.state.Height = req.Height
|
||||
|
||||
return &types.ResponseFinalizeBlock{TxResults: respTxs, ValidatorUpdates: app.valUpdates, AgreedAppData: app.state.Hash()}, nil
|
||||
}
|
||||
|
||||
// Commit is called after FinalizeBlock and after Tendermint state which includes the updates to
|
||||
// AppHash, ConsensusParams and ValidatorSet has occurred.
|
||||
// The KVStore persists the validator updates and the new key values
|
||||
func (app *Application) Commit(_ context.Context, _ *types.RequestCommit) (*types.ResponseCommit, error) {
|
||||
|
||||
// apply the validator updates to state (note this is really the validator set at h + 2)
|
||||
for _, valUpdate := range app.valUpdates {
|
||||
app.updateValidator(valUpdate)
|
||||
}
|
||||
|
||||
// persist all the staged txs in the kvstore
|
||||
for _, tx := range app.stagedTxs {
|
||||
parts := bytes.Split(tx, []byte("="))
|
||||
if len(parts) != 2 {
|
||||
panic(fmt.Sprintf("unexpected tx format. Expected 2 got %d: %s", len(parts), parts))
|
||||
}
|
||||
key, value := string(parts[0]), string(parts[1])
|
||||
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
// persist the state (i.e. size and height)
|
||||
saveState(app.state)
|
||||
|
||||
resp := &types.ResponseCommit{}
|
||||
if app.RetainBlocks > 0 && app.state.Height >= app.RetainBlocks {
|
||||
resp.RetainHeight = app.state.Height - app.RetainBlocks + 1
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Returns an associated value or nil if missing.
|
||||
func (app *Application) Query(_ context.Context, reqQuery *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
resQuery := &types.ResponseQuery{}
|
||||
|
||||
if reqQuery.Path == "/val" {
|
||||
key := []byte(ValidatorPrefix + string(reqQuery.Data))
|
||||
value, err := app.state.db.Get(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return &types.ResponseQuery{
|
||||
Key: reqQuery.Data,
|
||||
Value: value,
|
||||
}, nil
|
||||
}
|
||||
|
||||
if reqQuery.Prove {
|
||||
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if value == nil {
|
||||
resQuery.Log = "does not exist"
|
||||
} else {
|
||||
resQuery.Log = "exists"
|
||||
}
|
||||
resQuery.Index = -1 // TODO make Proof return index
|
||||
resQuery.Key = reqQuery.Data
|
||||
resQuery.Value = value
|
||||
resQuery.Height = app.state.Height
|
||||
|
||||
return resQuery, nil
|
||||
}
|
||||
|
||||
resQuery.Key = reqQuery.Data
|
||||
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if value == nil {
|
||||
resQuery.Log = "does not exist"
|
||||
} else {
|
||||
resQuery.Log = "exists"
|
||||
}
|
||||
resQuery.Value = value
|
||||
resQuery.Height = app.state.Height
|
||||
|
||||
return resQuery, nil
|
||||
}
|
||||
|
||||
func (app *Application) Close() error {
|
||||
return app.state.db.Close()
|
||||
}
|
||||
|
||||
func isValidatorTx(tx []byte) bool {
|
||||
return strings.HasPrefix(string(tx), ValidatorPrefix)
|
||||
}
|
||||
|
||||
func parseValidatorTx(tx []byte) ([]byte, int64, error) {
|
||||
tx = tx[len(ValidatorPrefix):]
|
||||
|
||||
// get the pubkey and power
|
||||
pubKeyAndPower := strings.Split(string(tx), "!")
|
||||
if len(pubKeyAndPower) != 2 {
|
||||
return nil, 0, fmt.Errorf("expected 'pubkey!power'. Got %v", pubKeyAndPower)
|
||||
}
|
||||
pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1]
|
||||
|
||||
// decode the pubkey
|
||||
pubkey, err := base64.StdEncoding.DecodeString(pubkeyS)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("pubkey (%s) is invalid base64", pubkeyS)
|
||||
}
|
||||
|
||||
// decode the power
|
||||
power, err := strconv.ParseInt(powerS, 10, 64)
|
||||
if err != nil {
|
||||
return nil, 0, fmt.Errorf("power (%s) is not an int", powerS)
|
||||
}
|
||||
|
||||
if power < 0 {
|
||||
return nil, 0, fmt.Errorf("power can not be less than 0, got %d", power)
|
||||
}
|
||||
|
||||
return pubkey, power, nil
|
||||
}
|
||||
|
||||
// add, update, or remove a validator
|
||||
func (app *Application) updateValidator(v types.ValidatorUpdate) {
|
||||
pubkey, err := cryptoencoding.PubKeyFromProto(v.PubKey)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("can't decode public key: %w", err))
|
||||
}
|
||||
key := []byte(ValidatorPrefix + string(pubkey.Bytes()))
|
||||
|
||||
if v.Power == 0 {
|
||||
// remove validator
|
||||
hasKey, err := app.state.db.Has(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if !hasKey {
|
||||
pubStr := base64.StdEncoding.EncodeToString(pubkey.Bytes())
|
||||
app.logger.Info("tried to remove non existent validator. Skipping...", "pubKey", pubStr)
|
||||
}
|
||||
if err = app.state.db.Delete(key); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
delete(app.valAddrToPubKeyMap, string(pubkey.Address()))
|
||||
} else {
|
||||
// add or update validator
|
||||
value := bytes.NewBuffer(make([]byte, 0))
|
||||
if err := types.WriteMessage(&v, value); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err = app.state.db.Set(key, value.Bytes()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey
|
||||
}
|
||||
}
|
||||
|
||||
func (app *Application) getValidators() (validators []types.ValidatorUpdate) {
|
||||
itr, err := app.state.db.Iterator(nil, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for ; itr.Valid(); itr.Next() {
|
||||
if isValidatorTx(itr.Key()) {
|
||||
validator := new(types.ValidatorUpdate)
|
||||
err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
validators = append(validators, *validator)
|
||||
}
|
||||
}
|
||||
if err = itr.Error(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -----------------------------
|
||||
|
||||
type State struct {
|
||||
db dbm.DB
|
||||
// Size is essentially the amount of transactions that have been processes.
|
||||
// This is used for the appHash
|
||||
Size int64 `json:"size"`
|
||||
Height int64 `json:"height"`
|
||||
db dbm.DB
|
||||
Size int64 `json:"size"`
|
||||
Height int64 `json:"height"`
|
||||
AppHash []byte `json:"app_hash"`
|
||||
}
|
||||
|
||||
func loadState(db dbm.DB) State {
|
||||
@@ -452,17 +55,119 @@ func saveState(state State) {
|
||||
}
|
||||
}
|
||||
|
||||
// Hash returns the hash of the application state. This is computed
|
||||
// as the size or number of transactions processed within the state. Note that this isn't
|
||||
// a strong guarantee of state machine replication because states could
|
||||
// have different kv values but still have the same size.
|
||||
// This function is used as the "AgreedAppData"
|
||||
func (s State) Hash() []byte {
|
||||
appHash := make([]byte, 8)
|
||||
binary.PutVarint(appHash, s.Size)
|
||||
return appHash
|
||||
}
|
||||
|
||||
func prefixKey(key []byte) []byte {
|
||||
return append(kvPairPrefixKey, key...)
|
||||
}
|
||||
|
||||
//---------------------------------------------------
|
||||
|
||||
var _ types.Application = (*Application)(nil)
|
||||
|
||||
type Application struct {
|
||||
types.BaseApplication
|
||||
|
||||
state State
|
||||
RetainBlocks int64 // blocks to retain after commit (via ResponseCommit.RetainHeight)
|
||||
}
|
||||
|
||||
func NewApplication() *Application {
|
||||
state := loadState(dbm.NewMemDB())
|
||||
return &Application{state: state}
|
||||
}
|
||||
|
||||
func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
|
||||
return types.ResponseInfo{
|
||||
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
|
||||
Version: version.ABCIVersion,
|
||||
AppVersion: ProtocolVersion,
|
||||
LastBlockHeight: app.state.Height,
|
||||
LastBlockAppHash: app.state.AppHash,
|
||||
}
|
||||
}
|
||||
|
||||
// tx is either "key=value" or just arbitrary bytes
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
var key, value string
|
||||
|
||||
parts := bytes.Split(req.Tx, []byte("="))
|
||||
if len(parts) == 2 {
|
||||
key, value = string(parts[0]), string(parts[1])
|
||||
} else {
|
||||
key, value = string(req.Tx), string(req.Tx)
|
||||
}
|
||||
|
||||
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.state.Size++
|
||||
|
||||
events := []types.Event{
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []types.EventAttribute{
|
||||
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
|
||||
{Key: "key", Value: key, Index: true},
|
||||
{Key: "index_key", Value: "index is working", Index: true},
|
||||
{Key: "noindex_key", Value: "index is working", Index: false},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
|
||||
}
|
||||
|
||||
func (app *Application) Commit() types.ResponseCommit {
|
||||
// Using a memdb - just return the big endian size of the db
|
||||
appHash := make([]byte, 8)
|
||||
binary.PutVarint(appHash, app.state.Size)
|
||||
app.state.AppHash = appHash
|
||||
app.state.Height++
|
||||
saveState(app.state)
|
||||
|
||||
resp := types.ResponseCommit{Data: appHash}
|
||||
if app.RetainBlocks > 0 && app.state.Height >= app.RetainBlocks {
|
||||
resp.RetainHeight = app.state.Height - app.RetainBlocks + 1
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
// Returns an associated value or nil if missing.
|
||||
func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
if reqQuery.Prove {
|
||||
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if value == nil {
|
||||
resQuery.Log = "does not exist"
|
||||
} else {
|
||||
resQuery.Log = "exists"
|
||||
}
|
||||
resQuery.Index = -1 // TODO make Proof return index
|
||||
resQuery.Key = reqQuery.Data
|
||||
resQuery.Value = value
|
||||
resQuery.Height = app.state.Height
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
resQuery.Key = reqQuery.Data
|
||||
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if value == nil {
|
||||
resQuery.Log = "does not exist"
|
||||
} else {
|
||||
resQuery.Log = "exists"
|
||||
}
|
||||
resQuery.Value = value
|
||||
resQuery.Height = app.state.Height
|
||||
|
||||
return resQuery
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ package kvstore
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
@@ -12,8 +13,10 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -21,82 +24,81 @@ const (
|
||||
testValue = "def"
|
||||
)
|
||||
|
||||
func TestKVStoreKV(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
var ctx = context.Background()
|
||||
|
||||
kvstore := NewInMemoryApplication()
|
||||
tx := []byte(testKey + ":" + testValue)
|
||||
testKVStore(ctx, t, kvstore, tx, testKey, testValue)
|
||||
tx = []byte(testKey + "=" + testValue)
|
||||
testKVStore(ctx, t, kvstore, tx, testKey, testValue)
|
||||
}
|
||||
|
||||
func testKVStore(ctx context.Context, t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||
checkTxResp, err := app.CheckTx(ctx, &types.RequestCheckTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, uint32(0), checkTxResp.Code)
|
||||
|
||||
ppResp, err := app.PrepareProposal(ctx, &types.RequestPrepareProposal{Txs: [][]byte{tx}})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, ppResp.Txs, 1)
|
||||
req := &types.RequestFinalizeBlock{Height: 1, Txs: ppResp.Txs}
|
||||
ar, err := app.FinalizeBlock(ctx, req)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(ar.TxResults))
|
||||
require.False(t, ar.TxResults[0].IsErr())
|
||||
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||
req := types.RequestDeliverTx{Tx: tx}
|
||||
ar := app.DeliverTx(req)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// repeating tx doesn't raise error
|
||||
ar = app.DeliverTx(req)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// commit
|
||||
_, err = app.Commit(ctx, &types.RequestCommit{})
|
||||
require.NoError(t, err)
|
||||
app.Commit()
|
||||
|
||||
info, err := app.Info(ctx, &types.RequestInfo{})
|
||||
require.NoError(t, err)
|
||||
info := app.Info(types.RequestInfo{})
|
||||
require.NotZero(t, info.LastBlockHeight)
|
||||
|
||||
// make sure query is fine
|
||||
resQuery, err := app.Query(ctx, &types.RequestQuery{
|
||||
resQuery := app.Query(types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, CodeTypeOK, resQuery.Code)
|
||||
require.Equal(t, code.CodeTypeOK, resQuery.Code)
|
||||
require.Equal(t, key, string(resQuery.Key))
|
||||
require.Equal(t, value, string(resQuery.Value))
|
||||
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
|
||||
|
||||
// make sure proof is fine
|
||||
resQuery, err = app.Query(ctx, &types.RequestQuery{
|
||||
resQuery = app.Query(types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
Prove: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, CodeTypeOK, resQuery.Code)
|
||||
require.EqualValues(t, code.CodeTypeOK, resQuery.Code)
|
||||
require.Equal(t, key, string(resQuery.Key))
|
||||
require.Equal(t, value, string(resQuery.Value))
|
||||
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreKV(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
kvstore := NewPersistentApplication(t.TempDir())
|
||||
func TestKVStoreKV(t *testing.T) {
|
||||
kvstore := NewApplication()
|
||||
key := testKey
|
||||
value := testValue
|
||||
testKVStore(ctx, t, kvstore, NewTx(key, value), key, value)
|
||||
value := key
|
||||
tx := []byte(key)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
|
||||
value = testValue
|
||||
tx = []byte(key + "=" + value)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreKV(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvstore := NewPersistentKVStoreApplication(dir)
|
||||
key := testKey
|
||||
value := key
|
||||
tx := []byte(key)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
|
||||
value = testValue
|
||||
tx = []byte(key + "=" + value)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
kvstore := NewPersistentApplication(t.TempDir())
|
||||
require.NoError(t, InitKVStore(ctx, kvstore))
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvstore := NewPersistentKVStoreApplication(dir)
|
||||
InitKVStore(kvstore)
|
||||
height := int64(0)
|
||||
|
||||
resInfo, err := kvstore.Info(ctx, &types.RequestInfo{})
|
||||
require.NoError(t, err)
|
||||
resInfo := kvstore.Info(types.RequestInfo{})
|
||||
if resInfo.LastBlockHeight != height {
|
||||
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
|
||||
}
|
||||
@@ -104,37 +106,38 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
// make and apply block
|
||||
height = int64(1)
|
||||
hash := []byte("foo")
|
||||
if _, err := kvstore.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Hash: hash, Height: height}); err != nil {
|
||||
t.Fatal(err)
|
||||
header := tmproto.Header{
|
||||
Height: height,
|
||||
}
|
||||
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
|
||||
kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
|
||||
kvstore.Commit()
|
||||
|
||||
_, err = kvstore.Commit(ctx, &types.RequestCommit{})
|
||||
require.NoError(t, err)
|
||||
|
||||
resInfo, err = kvstore.Info(ctx, &types.RequestInfo{})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, height, resInfo.LastBlockHeight)
|
||||
resInfo = kvstore.Info(types.RequestInfo{})
|
||||
if resInfo.LastBlockHeight != height {
|
||||
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// add a validator, remove a validator, update a validator
|
||||
func TestValUpdates(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
kvstore := NewInMemoryApplication()
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvstore := NewPersistentKVStoreApplication(dir)
|
||||
|
||||
// init with some validators
|
||||
total := 10
|
||||
nInit := 5
|
||||
vals := RandVals(total)
|
||||
// initialize with the first nInit
|
||||
_, err := kvstore.InitChain(ctx, &types.RequestInitChain{
|
||||
kvstore.InitChain(types.RequestInitChain{
|
||||
Validators: vals[:nInit],
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
vals1, vals2 := vals[:nInit], kvstore.getValidators()
|
||||
vals1, vals2 := vals[:nInit], kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
var v1, v2, v3 types.ValidatorUpdate
|
||||
@@ -145,9 +148,9 @@ func TestValUpdates(t *testing.T) {
|
||||
tx1 := MakeValSetChangeTx(v1.PubKey, v1.Power)
|
||||
tx2 := MakeValSetChangeTx(v2.PubKey, v2.Power)
|
||||
|
||||
makeApplyBlock(ctx, t, kvstore, 1, diff, tx1, tx2)
|
||||
makeApplyBlock(t, kvstore, 1, diff, tx1, tx2)
|
||||
|
||||
vals1, vals2 = vals[:nInit+2], kvstore.getValidators()
|
||||
vals1, vals2 = vals[:nInit+2], kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
// remove some validators
|
||||
@@ -160,10 +163,10 @@ func TestValUpdates(t *testing.T) {
|
||||
tx2 = MakeValSetChangeTx(v2.PubKey, v2.Power)
|
||||
tx3 := MakeValSetChangeTx(v3.PubKey, v3.Power)
|
||||
|
||||
makeApplyBlock(ctx, t, kvstore, 2, diff, tx1, tx2, tx3)
|
||||
makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3)
|
||||
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) //nolint: gocritic
|
||||
vals2 = kvstore.getValidators()
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) // nolint: gocritic
|
||||
vals2 = kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
// update some validators
|
||||
@@ -176,62 +179,15 @@ func TestValUpdates(t *testing.T) {
|
||||
diff = []types.ValidatorUpdate{v1}
|
||||
tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power)
|
||||
|
||||
makeApplyBlock(ctx, t, kvstore, 3, diff, tx1)
|
||||
makeApplyBlock(t, kvstore, 3, diff, tx1)
|
||||
|
||||
vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...)
|
||||
vals2 = kvstore.getValidators()
|
||||
vals2 = kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
}
|
||||
|
||||
func TestCheckTx(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
kvstore := NewInMemoryApplication()
|
||||
|
||||
val := RandVal(1)
|
||||
|
||||
testCases := []struct {
|
||||
expCode uint32
|
||||
tx []byte
|
||||
}{
|
||||
{CodeTypeOK, NewTx("hello", "world")},
|
||||
{CodeTypeInvalidTxFormat, []byte("hello")},
|
||||
{CodeTypeOK, []byte("space:jam")},
|
||||
{CodeTypeInvalidTxFormat, []byte("=hello")},
|
||||
{CodeTypeInvalidTxFormat, []byte("hello=")},
|
||||
{CodeTypeOK, []byte("a=b")},
|
||||
{CodeTypeInvalidTxFormat, []byte("val=hello")},
|
||||
{CodeTypeInvalidTxFormat, []byte("val=hi!5")},
|
||||
{CodeTypeOK, MakeValSetChangeTx(val.PubKey, 10)},
|
||||
}
|
||||
|
||||
for idx, tc := range testCases {
|
||||
resp, err := kvstore.CheckTx(ctx, &types.RequestCheckTx{Tx: tc.tx})
|
||||
require.NoError(t, err, idx)
|
||||
fmt.Println(string(tc.tx))
|
||||
require.Equal(t, tc.expCode, resp.Code, idx)
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientServer(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
// set up socket app
|
||||
kvstore := NewInMemoryApplication()
|
||||
client, _, err := makeClientServer(t, kvstore, "kvstore-socket", "socket")
|
||||
require.NoError(t, err)
|
||||
runClientTests(ctx, t, client)
|
||||
|
||||
// set up grpc app
|
||||
kvstore = NewInMemoryApplication()
|
||||
gclient, _, err := makeClientServer(t, kvstore, t.TempDir(), "grpc")
|
||||
require.NoError(t, err)
|
||||
runClientTests(ctx, t, gclient)
|
||||
}
|
||||
|
||||
func makeApplyBlock(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
kvstore types.Application,
|
||||
heightInt int,
|
||||
@@ -240,23 +196,25 @@ func makeApplyBlock(
|
||||
// make and apply block
|
||||
height := int64(heightInt)
|
||||
hash := []byte("foo")
|
||||
resFinalizeBlock, err := kvstore.FinalizeBlock(ctx, &types.RequestFinalizeBlock{
|
||||
Hash: hash,
|
||||
header := tmproto.Header{
|
||||
Height: height,
|
||||
Txs: txs,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
_, err = kvstore.Commit(ctx, &types.RequestCommit{})
|
||||
require.NoError(t, err)
|
||||
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
|
||||
for _, tx := range txs {
|
||||
if r := kvstore.DeliverTx(types.RequestDeliverTx{Tx: tx}); r.IsErr() {
|
||||
t.Fatal(r)
|
||||
}
|
||||
}
|
||||
resEndBlock := kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
|
||||
kvstore.Commit()
|
||||
|
||||
valsEqual(t, diff, resFinalizeBlock.ValidatorUpdates)
|
||||
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
|
||||
|
||||
}
|
||||
|
||||
// order doesn't matter
|
||||
func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) {
|
||||
t.Helper()
|
||||
if len(vals1) != len(vals2) {
|
||||
t.Fatalf("vals dont match in len. got %d, expected %d", len(vals2), len(vals1))
|
||||
}
|
||||
@@ -271,50 +229,138 @@ func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) {
|
||||
}
|
||||
}
|
||||
|
||||
func makeClientServer(t *testing.T, app types.Application, name, transport string) (abcicli.Client, service.Service, error) {
|
||||
func makeSocketClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) {
|
||||
// Start the listener
|
||||
addr := fmt.Sprintf("unix://%s.sock", name)
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
logger := log.TestingLogger()
|
||||
|
||||
server, err := abciserver.NewServer(addr, transport, app)
|
||||
require.NoError(t, err)
|
||||
server := abciserver.NewSocketServer(socket, app)
|
||||
server.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := server.Start(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Connect to the socket
|
||||
client := abcicli.NewSocketClient(socket, false)
|
||||
client.SetLogger(logger.With("module", "abci-client"))
|
||||
if err := client.Start(); err != nil {
|
||||
if err = server.Stop(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return client, server, nil
|
||||
}
|
||||
|
||||
func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) {
|
||||
// Start the listener
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
logger := log.TestingLogger()
|
||||
|
||||
gapp := types.NewGRPCApplication(app)
|
||||
server := abciserver.NewGRPCServer(socket, gapp)
|
||||
server.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := server.Start(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
client := abcicli.NewGRPCClient(socket, true)
|
||||
client.SetLogger(logger.With("module", "abci-client"))
|
||||
if err := client.Start(); err != nil {
|
||||
if err := server.Stop(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return nil, nil, err
|
||||
}
|
||||
return client, server, nil
|
||||
}
|
||||
|
||||
func TestClientServer(t *testing.T) {
|
||||
// set up socket app
|
||||
kvstore := NewApplication()
|
||||
client, server, err := makeSocketClientServer(kvstore, "kvstore-socket")
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Connect to the client
|
||||
client, err := abcicli.NewClient(addr, transport, false)
|
||||
require.NoError(t, err)
|
||||
client.SetLogger(logger.With("module", "abci-client"))
|
||||
if err := client.Start(); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
return client, server, nil
|
||||
runClientTests(t, client)
|
||||
|
||||
// set up grpc app
|
||||
kvstore = NewApplication()
|
||||
gclient, gserver, err := makeGRPCClientServer(kvstore, "kvstore-grpc")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := gserver.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
t.Cleanup(func() {
|
||||
if err := gclient.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
runClientTests(t, gclient)
|
||||
}
|
||||
|
||||
func runClientTests(ctx context.Context, t *testing.T, client abcicli.Client) {
|
||||
func runClientTests(t *testing.T, client abcicli.Client) {
|
||||
// run some tests....
|
||||
tx := []byte(testKey + ":" + testValue)
|
||||
testKVStore(ctx, t, client, tx, testKey, testValue)
|
||||
tx = []byte(testKey + "=" + testValue)
|
||||
testKVStore(ctx, t, client, tx, testKey, testValue)
|
||||
key := testKey
|
||||
value := key
|
||||
tx := []byte(key)
|
||||
testClient(t, client, tx, key, value)
|
||||
|
||||
value = testValue
|
||||
tx = []byte(key + "=" + value)
|
||||
testClient(t, client, tx, key, value)
|
||||
}
|
||||
|
||||
func TestTxGeneration(t *testing.T) {
|
||||
require.Len(t, NewRandomTx(20), 20)
|
||||
require.Len(t, NewRandomTxs(10), 10)
|
||||
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
|
||||
ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// repeating tx doesn't raise error
|
||||
ar, err = app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// commit
|
||||
_, err = app.CommitSync(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := app.InfoSync(ctx, types.RequestInfo{})
|
||||
require.NoError(t, err)
|
||||
require.NotZero(t, info.LastBlockHeight)
|
||||
|
||||
// make sure query is fine
|
||||
resQuery, err := app.QuerySync(ctx, types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
})
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, code.CodeTypeOK, resQuery.Code)
|
||||
require.Equal(t, key, string(resQuery.Key))
|
||||
require.Equal(t, value, string(resQuery.Value))
|
||||
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
|
||||
|
||||
// make sure proof is fine
|
||||
resQuery, err = app.QuerySync(ctx, types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
Prove: true,
|
||||
})
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, code.CodeTypeOK, resQuery.Code)
|
||||
require.Equal(t, key, string(resQuery.Key))
|
||||
require.Equal(t, value, string(resQuery.Value))
|
||||
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
|
||||
}
|
||||
|
||||
286
abci/example/kvstore/persistent_kvstore.go
Normal file
286
abci/example/kvstore/persistent_kvstore.go
Normal file
@@ -0,0 +1,286 @@
|
||||
package kvstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
pc "github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
)
|
||||
|
||||
const (
|
||||
ValidatorSetChangePrefix string = "val:"
|
||||
)
|
||||
|
||||
//-----------------------------------------
|
||||
|
||||
var _ types.Application = (*PersistentKVStoreApplication)(nil)
|
||||
|
||||
type PersistentKVStoreApplication struct {
|
||||
app *Application
|
||||
|
||||
// validator set
|
||||
ValUpdates []types.ValidatorUpdate
|
||||
|
||||
valAddrToPubKeyMap map[string]pc.PublicKey
|
||||
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication {
|
||||
name := "kvstore"
|
||||
db, err := dbm.NewGoLevelDB(name, dbDir)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
state := loadState(db)
|
||||
|
||||
return &PersistentKVStoreApplication{
|
||||
app: &Application{state: state},
|
||||
valAddrToPubKeyMap: make(map[string]pc.PublicKey),
|
||||
logger: log.NewNopLogger(),
|
||||
}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) Close() error {
|
||||
return app.app.state.db.Close()
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) SetLogger(l log.Logger) {
|
||||
app.logger = l
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
res := app.app.Info(req)
|
||||
res.LastBlockHeight = app.app.state.Height
|
||||
res.LastBlockAppHash = app.app.state.AppHash
|
||||
return res
|
||||
}
|
||||
|
||||
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
// if it starts with "val:", update the validator set
|
||||
// format is "val:pubkey!power"
|
||||
if isValidatorTx(req.Tx) {
|
||||
// update validators in the merkle tree
|
||||
// and in app.ValUpdates
|
||||
return app.execValidatorTx(req.Tx)
|
||||
}
|
||||
|
||||
// otherwise, update the key-value store
|
||||
return app.app.DeliverTx(req)
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
return app.app.CheckTx(req)
|
||||
}
|
||||
|
||||
// Commit will panic if InitChain was not called
|
||||
func (app *PersistentKVStoreApplication) Commit() types.ResponseCommit {
|
||||
return app.app.Commit()
|
||||
}
|
||||
|
||||
// When path=/val and data={validator address}, returns the validator update (types.ValidatorUpdate) varint encoded.
|
||||
// For any other path, returns an associated value or nil if missing.
|
||||
func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
switch reqQuery.Path {
|
||||
case "/val":
|
||||
key := []byte("val:" + string(reqQuery.Data))
|
||||
value, err := app.app.state.db.Get(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
resQuery.Key = reqQuery.Data
|
||||
resQuery.Value = value
|
||||
return
|
||||
default:
|
||||
return app.app.Query(reqQuery)
|
||||
}
|
||||
}
|
||||
|
||||
// Save the validators in the merkle tree
|
||||
func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) types.ResponseInitChain {
|
||||
for _, v := range req.Validators {
|
||||
r := app.updateValidator(v)
|
||||
if r.IsErr() {
|
||||
app.logger.Error("Error updating validators", "r", r)
|
||||
}
|
||||
}
|
||||
return types.ResponseInitChain{}
|
||||
}
|
||||
|
||||
// Track the block hash and header information
|
||||
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
// reset valset changes
|
||||
app.ValUpdates = make([]types.ValidatorUpdate, 0)
|
||||
|
||||
// Punish validators who committed equivocation.
|
||||
for _, ev := range req.ByzantineValidators {
|
||||
if ev.Type == types.EvidenceType_DUPLICATE_VOTE {
|
||||
addr := string(ev.Validator.Address)
|
||||
if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok {
|
||||
app.updateValidator(types.ValidatorUpdate{
|
||||
PubKey: pubKey,
|
||||
Power: ev.Validator.Power - 1,
|
||||
})
|
||||
app.logger.Info("Decreased val power by 1 because of the equivocation",
|
||||
"val", addr)
|
||||
} else {
|
||||
app.logger.Error("Wanted to punish val, but can't find it",
|
||||
"val", addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
// Update the validator set
|
||||
func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
|
||||
return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ListSnapshots(
|
||||
req types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
return types.ResponseListSnapshots{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
|
||||
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
return types.ResponseLoadSnapshotChunk{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) OfferSnapshot(
|
||||
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
|
||||
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
|
||||
}
|
||||
|
||||
//---------------------------------------------
|
||||
// update validators
|
||||
|
||||
func (app *PersistentKVStoreApplication) Validators() (validators []types.ValidatorUpdate) {
|
||||
itr, err := app.app.state.db.Iterator(nil, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for ; itr.Valid(); itr.Next() {
|
||||
if isValidatorTx(itr.Key()) {
|
||||
validator := new(types.ValidatorUpdate)
|
||||
err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
validators = append(validators, *validator)
|
||||
}
|
||||
}
|
||||
if err = itr.Error(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func MakeValSetChangeTx(pubkey pc.PublicKey, power int64) []byte {
|
||||
pk, err := cryptoenc.PubKeyFromProto(pubkey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pubStr := base64.StdEncoding.EncodeToString(pk.Bytes())
|
||||
return []byte(fmt.Sprintf("val:%s!%d", pubStr, power))
|
||||
}
|
||||
|
||||
func isValidatorTx(tx []byte) bool {
|
||||
return strings.HasPrefix(string(tx), ValidatorSetChangePrefix)
|
||||
}
|
||||
|
||||
// format is "val:pubkey!power"
|
||||
// pubkey is a base64-encoded 32-byte ed25519 key
|
||||
func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.ResponseDeliverTx {
|
||||
tx = tx[len(ValidatorSetChangePrefix):]
|
||||
|
||||
// get the pubkey and power
|
||||
pubKeyAndPower := strings.Split(string(tx), "!")
|
||||
if len(pubKeyAndPower) != 2 {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)}
|
||||
}
|
||||
pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1]
|
||||
|
||||
// decode the pubkey
|
||||
pubkey, err := base64.StdEncoding.DecodeString(pubkeyS)
|
||||
if err != nil {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)}
|
||||
}
|
||||
|
||||
// decode the power
|
||||
power, err := strconv.ParseInt(powerS, 10, 64)
|
||||
if err != nil {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Power (%s) is not an int", powerS)}
|
||||
}
|
||||
|
||||
// update
|
||||
return app.updateValidator(types.UpdateValidator(pubkey, power, ""))
|
||||
}
|
||||
|
||||
// add, update, or remove a validator
|
||||
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx {
|
||||
pubkey, err := cryptoenc.PubKeyFromProto(v.PubKey)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("can't decode public key: %w", err))
|
||||
}
|
||||
key := []byte("val:" + string(pubkey.Bytes()))
|
||||
|
||||
if v.Power == 0 {
|
||||
// remove validator
|
||||
hasKey, err := app.app.state.db.Has(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if !hasKey {
|
||||
pubStr := base64.StdEncoding.EncodeToString(pubkey.Bytes())
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeUnauthorized,
|
||||
Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)}
|
||||
}
|
||||
if err = app.app.state.db.Delete(key); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
delete(app.valAddrToPubKeyMap, string(pubkey.Address()))
|
||||
} else {
|
||||
// add or update validator
|
||||
value := bytes.NewBuffer(make([]byte, 0))
|
||||
if err := types.WriteMessage(&v, value); err != nil {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Error encoding validator: %v", err)}
|
||||
}
|
||||
if err = app.app.state.db.Set(key, value.Bytes()); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey
|
||||
}
|
||||
|
||||
// we only update the changes array if we successfully updated the tree
|
||||
app.ValUpdates = append(app.ValUpdates, v)
|
||||
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
@@ -19,11 +18,11 @@ type GRPCServer struct {
|
||||
listener net.Listener
|
||||
server *grpc.Server
|
||||
|
||||
app types.Application
|
||||
app types.ABCIApplicationServer
|
||||
}
|
||||
|
||||
// NewGRPCServer returns a new gRPC ABCI server
|
||||
func NewGRPCServer(protoAddr string, app types.Application) service.Service {
|
||||
func NewGRPCServer(protoAddr string, app types.ABCIApplicationServer) service.Service {
|
||||
proto, addr := tmnet.ProtocolAndAddress(protoAddr)
|
||||
s := &GRPCServer{
|
||||
proto: proto,
|
||||
@@ -45,7 +44,7 @@ func (s *GRPCServer) OnStart() error {
|
||||
|
||||
s.listener = ln
|
||||
s.server = grpc.NewServer()
|
||||
types.RegisterABCIServer(s.server, &gRPCApplication{s.app})
|
||||
types.RegisterABCIApplicationServer(s.server, s.app)
|
||||
|
||||
s.Logger.Info("Listening", "proto", s.proto, "addr", s.addr)
|
||||
go func() {
|
||||
@@ -60,18 +59,3 @@ func (s *GRPCServer) OnStart() error {
|
||||
func (s *GRPCServer) OnStop() {
|
||||
s.server.Stop()
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
// gRPCApplication is a gRPC shim for Application
|
||||
type gRPCApplication struct {
|
||||
types.Application
|
||||
}
|
||||
|
||||
func (app *gRPCApplication) Echo(_ context.Context, req *types.RequestEcho) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: req.Message}, nil
|
||||
}
|
||||
|
||||
func (app *gRPCApplication) Flush(_ context.Context, req *types.RequestFlush) (*types.ResponseFlush, error) {
|
||||
return &types.ResponseFlush{}, nil
|
||||
}
|
||||
|
||||
@@ -2,8 +2,9 @@
|
||||
Package server is used to start a new ABCI server.
|
||||
|
||||
It contains two server implementation:
|
||||
- gRPC server
|
||||
- socket server
|
||||
* gRPC server
|
||||
* socket server
|
||||
|
||||
*/
|
||||
package server
|
||||
|
||||
@@ -14,8 +15,6 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
// NewServer is a utility function for out of process applications to set up either a socket or
|
||||
// grpc server that can listen to requests from the equivalent Tendermint client
|
||||
func NewServer(protoAddr, transport string, app types.Application) (service.Service, error) {
|
||||
var s service.Service
|
||||
var err error
|
||||
@@ -23,7 +22,7 @@ func NewServer(protoAddr, transport string, app types.Application) (service.Serv
|
||||
case "socket":
|
||||
s = NewSocketServer(protoAddr, app)
|
||||
case "grpc":
|
||||
s = NewGRPCServer(protoAddr, app)
|
||||
s = NewGRPCServer(protoAddr, types.NewGRPCApplication(app))
|
||||
default:
|
||||
err = fmt.Errorf("unknown server type %s", transport)
|
||||
}
|
||||
|
||||
@@ -2,8 +2,6 @@ package server
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
@@ -11,17 +9,14 @@ import (
|
||||
"runtime"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
tmlog "github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
// SocketServer is the server-side implementation of the TSP (Tendermint Socket Protocol)
|
||||
// for out-of-process go applications. Note, in the case of an application written in golang,
|
||||
// the developer may also run both Tendermint and the application within the same process.
|
||||
//
|
||||
// The socket server deliver
|
||||
// var maxNumberConnections = 2
|
||||
|
||||
type SocketServer struct {
|
||||
service.BaseService
|
||||
isLoggerSet bool
|
||||
@@ -38,9 +33,6 @@ type SocketServer struct {
|
||||
app types.Application
|
||||
}
|
||||
|
||||
const responseBufferSize = 1000
|
||||
|
||||
// NewSocketServer creates a server from a golang-based out-of-process application.
|
||||
func NewSocketServer(protoAddr string, app types.Application) service.Service {
|
||||
proto, addr := tmnet.ProtocolAndAddress(protoAddr)
|
||||
s := &SocketServer{
|
||||
@@ -128,8 +120,8 @@ func (s *SocketServer) acceptConnectionsRoutine() {
|
||||
|
||||
connID := s.addConn(conn)
|
||||
|
||||
closeConn := make(chan error, 2) // Push to signal connection closed
|
||||
responses := make(chan *types.Response, responseBufferSize) // A channel to buffer responses
|
||||
closeConn := make(chan error, 2) // Push to signal connection closed
|
||||
responses := make(chan *types.Response, 1000) // A channel to buffer responses
|
||||
|
||||
// Read requests from conn and deal with them
|
||||
go s.handleRequests(closeConn, conn, responses)
|
||||
@@ -165,9 +157,7 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, resp
|
||||
var bufReader = bufio.NewReader(conn)
|
||||
|
||||
defer func() {
|
||||
// make sure to recover from any app-related panics to allow proper socket cleanup.
|
||||
// In the case of a panic, we do not notify the client by passing an exception so
|
||||
// presume that the client is still running and retying to connect
|
||||
// make sure to recover from any app-related panics to allow proper socket cleanup
|
||||
r := recover()
|
||||
if r != nil {
|
||||
const size = 64 << 10
|
||||
@@ -196,112 +186,55 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, resp
|
||||
}
|
||||
s.appMtx.Lock()
|
||||
count++
|
||||
resp, err := s.handleRequest(context.TODO(), req)
|
||||
if err != nil {
|
||||
// any error either from the application or because of an unknown request
|
||||
// throws an exception back to the client. This will stop the server and
|
||||
// should also halt the client.
|
||||
responses <- types.ToResponseException(err.Error())
|
||||
} else {
|
||||
responses <- resp
|
||||
}
|
||||
s.handleRequest(req, responses)
|
||||
s.appMtx.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
// handleRequests takes a request and calls the application passing the returned
|
||||
func (s *SocketServer) handleRequest(ctx context.Context, req *types.Request) (*types.Response, error) {
|
||||
func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types.Response) {
|
||||
switch r := req.Value.(type) {
|
||||
case *types.Request_Echo:
|
||||
return types.ToResponseEcho(r.Echo.Message), nil
|
||||
responses <- types.ToResponseEcho(r.Echo.Message)
|
||||
case *types.Request_Flush:
|
||||
return types.ToResponseFlush(), nil
|
||||
responses <- types.ToResponseFlush()
|
||||
case *types.Request_Info:
|
||||
res, err := s.app.Info(ctx, r.Info)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseInfo(res), nil
|
||||
res := s.app.Info(*r.Info)
|
||||
responses <- types.ToResponseInfo(res)
|
||||
case *types.Request_DeliverTx:
|
||||
res := s.app.DeliverTx(*r.DeliverTx)
|
||||
responses <- types.ToResponseDeliverTx(res)
|
||||
case *types.Request_CheckTx:
|
||||
res, err := s.app.CheckTx(ctx, r.CheckTx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseCheckTx(res), nil
|
||||
res := s.app.CheckTx(*r.CheckTx)
|
||||
responses <- types.ToResponseCheckTx(res)
|
||||
case *types.Request_Commit:
|
||||
res, err := s.app.Commit(ctx, r.Commit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseCommit(res), nil
|
||||
res := s.app.Commit()
|
||||
responses <- types.ToResponseCommit(res)
|
||||
case *types.Request_Query:
|
||||
res, err := s.app.Query(ctx, r.Query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseQuery(res), nil
|
||||
res := s.app.Query(*r.Query)
|
||||
responses <- types.ToResponseQuery(res)
|
||||
case *types.Request_InitChain:
|
||||
res, err := s.app.InitChain(ctx, r.InitChain)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseInitChain(res), nil
|
||||
case *types.Request_FinalizeBlock:
|
||||
res, err := s.app.FinalizeBlock(ctx, r.FinalizeBlock)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseFinalizeBlock(res), nil
|
||||
res := s.app.InitChain(*r.InitChain)
|
||||
responses <- types.ToResponseInitChain(res)
|
||||
case *types.Request_BeginBlock:
|
||||
res := s.app.BeginBlock(*r.BeginBlock)
|
||||
responses <- types.ToResponseBeginBlock(res)
|
||||
case *types.Request_EndBlock:
|
||||
res := s.app.EndBlock(*r.EndBlock)
|
||||
responses <- types.ToResponseEndBlock(res)
|
||||
case *types.Request_ListSnapshots:
|
||||
res, err := s.app.ListSnapshots(ctx, r.ListSnapshots)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseListSnapshots(res), nil
|
||||
res := s.app.ListSnapshots(*r.ListSnapshots)
|
||||
responses <- types.ToResponseListSnapshots(res)
|
||||
case *types.Request_OfferSnapshot:
|
||||
res, err := s.app.OfferSnapshot(ctx, r.OfferSnapshot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseOfferSnapshot(res), nil
|
||||
case *types.Request_PrepareProposal:
|
||||
res, err := s.app.PrepareProposal(ctx, r.PrepareProposal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponsePrepareProposal(res), nil
|
||||
case *types.Request_ProcessProposal:
|
||||
res, err := s.app.ProcessProposal(ctx, r.ProcessProposal)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseProcessProposal(res), nil
|
||||
res := s.app.OfferSnapshot(*r.OfferSnapshot)
|
||||
responses <- types.ToResponseOfferSnapshot(res)
|
||||
case *types.Request_LoadSnapshotChunk:
|
||||
res, err := s.app.LoadSnapshotChunk(ctx, r.LoadSnapshotChunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseLoadSnapshotChunk(res), nil
|
||||
res := s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk)
|
||||
responses <- types.ToResponseLoadSnapshotChunk(res)
|
||||
case *types.Request_ApplySnapshotChunk:
|
||||
res, err := s.app.ApplySnapshotChunk(ctx, r.ApplySnapshotChunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseApplySnapshotChunk(res), nil
|
||||
case *types.Request_ExtendVote:
|
||||
res, err := s.app.ExtendVote(ctx, r.ExtendVote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseExtendVote(res), nil
|
||||
case *types.Request_VerifyVoteExtension:
|
||||
res, err := s.app.VerifyVoteExtension(ctx, r.VerifyVoteExtension)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.ToResponseVerifyVoteExtension(res), nil
|
||||
res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk)
|
||||
responses <- types.ToResponseApplySnapshotChunk(res)
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown request from client: %T", req)
|
||||
responses <- types.ToResponseException("Unknown request")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -323,13 +256,6 @@ func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, res
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// If the application has responded with an exception, the server returns the error
|
||||
// back to the client and closes the connection. The receiving Tendermint client should
|
||||
// log the error and gracefully terminate
|
||||
if e, ok := res.Value.(*types.Response_Exception); ok {
|
||||
closeConn <- errors.New(e.Exception.Error)
|
||||
}
|
||||
count++
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,29 +11,17 @@ import (
|
||||
)
|
||||
|
||||
func TestClientServerNoAddrPrefix(t *testing.T) {
|
||||
t.Helper()
|
||||
|
||||
addr := "localhost:26658"
|
||||
transport := "socket"
|
||||
app := kvstore.NewInMemoryApplication()
|
||||
app := kvstore.NewApplication()
|
||||
|
||||
server, err := abciserver.NewServer(addr, transport, app)
|
||||
assert.NoError(t, err, "expected no error on NewServer")
|
||||
err = server.Start()
|
||||
assert.NoError(t, err, "expected no error on server.Start")
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
|
||||
client, err := abciclient.NewClient(addr, transport, true)
|
||||
assert.NoError(t, err, "expected no error on NewClient")
|
||||
err = client.Start()
|
||||
assert.NoError(t, err, "expected no error on client.Start")
|
||||
t.Cleanup(func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,21 +5,25 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
mrand "math/rand"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
)
|
||||
|
||||
func InitChain(ctx context.Context, client abcicli.Client) error {
|
||||
var ctx = context.Background()
|
||||
|
||||
func InitChain(client abcicli.Client) error {
|
||||
total := 10
|
||||
vals := make([]types.ValidatorUpdate, total)
|
||||
for i := 0; i < total; i++ {
|
||||
pubkey := tmrand.Bytes(33)
|
||||
power := tmrand.Int()
|
||||
// nolint:gosec // G404: Use of weak random number generator
|
||||
power := mrand.Int()
|
||||
vals[i] = types.UpdateValidator(pubkey, int64(power), "")
|
||||
}
|
||||
_, err := client.InitChain(ctx, &types.RequestInitChain{
|
||||
_, err := client.InitChainSync(ctx, types.RequestInitChain{
|
||||
Validators: vals,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -30,72 +34,44 @@ func InitChain(ctx context.Context, client abcicli.Client) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func Commit(ctx context.Context, client abcicli.Client) error {
|
||||
_, err := client.Commit(ctx, &types.RequestCommit{})
|
||||
func Commit(client abcicli.Client, hashExp []byte) error {
|
||||
res, err := client.CommitSync(ctx)
|
||||
data := res.Data
|
||||
if err != nil {
|
||||
fmt.Println("Failed test: Commit")
|
||||
fmt.Printf("error while committing: %v\n", err)
|
||||
return err
|
||||
}
|
||||
if !bytes.Equal(data, hashExp) {
|
||||
fmt.Println("Failed test: Commit")
|
||||
fmt.Printf("Commit hash was unexpected. Got %X expected %X\n", data, hashExp)
|
||||
return errors.New("commitTx failed")
|
||||
}
|
||||
fmt.Println("Passed test: Commit")
|
||||
return nil
|
||||
}
|
||||
|
||||
func FinalizeBlock(ctx context.Context, client abcicli.Client, txBytes [][]byte, codeExp []uint32, dataExp []byte, hashExp []byte) error {
|
||||
res, _ := client.FinalizeBlock(ctx, &types.RequestFinalizeBlock{Txs: txBytes})
|
||||
appHash := res.AgreedAppData
|
||||
for i, tx := range res.TxResults {
|
||||
code, data, log := tx.Code, tx.Data, tx.Log
|
||||
if code != codeExp[i] {
|
||||
fmt.Println("Failed test: FinalizeBlock")
|
||||
fmt.Printf("FinalizeBlock response code was unexpected. Got %v expected %v. Log: %v\n",
|
||||
code, codeExp, log)
|
||||
return errors.New("FinalizeBlock error")
|
||||
}
|
||||
if !bytes.Equal(data, dataExp) {
|
||||
fmt.Println("Failed test: FinalizeBlock")
|
||||
fmt.Printf("FinalizeBlock response data was unexpected. Got %X expected %X\n",
|
||||
data, dataExp)
|
||||
return errors.New("FinalizeBlock error")
|
||||
}
|
||||
func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
|
||||
code, codeExp, log)
|
||||
return errors.New("deliverTx error")
|
||||
}
|
||||
if !bytes.Equal(appHash, hashExp) {
|
||||
fmt.Println("Failed test: FinalizeBlock")
|
||||
fmt.Printf("Application hash was unexpected. Got %X expected %X\n", appHash, hashExp)
|
||||
return errors.New("FinalizeBlock error")
|
||||
if !bytes.Equal(data, dataExp) {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
|
||||
data, dataExp)
|
||||
return errors.New("deliverTx error")
|
||||
}
|
||||
fmt.Println("Passed test: FinalizeBlock")
|
||||
fmt.Println("Passed test: DeliverTx")
|
||||
return nil
|
||||
}
|
||||
|
||||
func PrepareProposal(ctx context.Context, client abcicli.Client, txBytes [][]byte, txExpected [][]byte, dataExp []byte) error {
|
||||
res, _ := client.PrepareProposal(ctx, &types.RequestPrepareProposal{Txs: txBytes})
|
||||
for i, tx := range res.Txs {
|
||||
if !bytes.Equal(tx, txExpected[i]) {
|
||||
fmt.Println("Failed test: PrepareProposal")
|
||||
fmt.Printf("PrepareProposal transaction was unexpected. Got %x expected %x.",
|
||||
tx, txExpected[i])
|
||||
return errors.New("PrepareProposal error")
|
||||
}
|
||||
}
|
||||
fmt.Println("Passed test: PrepareProposal")
|
||||
return nil
|
||||
}
|
||||
|
||||
func ProcessProposal(ctx context.Context, client abcicli.Client, txBytes [][]byte, statusExp types.ResponseProcessProposal_ProposalStatus) error {
|
||||
res, _ := client.ProcessProposal(ctx, &types.RequestProcessProposal{Txs: txBytes})
|
||||
if res.Status != statusExp {
|
||||
fmt.Println("Failed test: ProcessProposal")
|
||||
fmt.Printf("ProcessProposal response status was unexpected. Got %v expected %v.",
|
||||
res.Status, statusExp)
|
||||
return errors.New("ProcessProposal error")
|
||||
}
|
||||
fmt.Println("Passed test: ProcessProposal")
|
||||
return nil
|
||||
}
|
||||
|
||||
func CheckTx(ctx context.Context, client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.CheckTx(ctx, &types.RequestCheckTx{Tx: txBytes})
|
||||
func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: CheckTx")
|
||||
|
||||
@@ -1,12 +1,10 @@
|
||||
echo hello
|
||||
info
|
||||
prepare_proposal "abc=123"
|
||||
process_proposal "abc=123"
|
||||
finalize_block "abc=123"
|
||||
commit
|
||||
deliver_tx "abc"
|
||||
info
|
||||
commit
|
||||
query "abc"
|
||||
finalize_block "def=xyz" "ghi=123"
|
||||
deliver_tx "def=xyz"
|
||||
commit
|
||||
query "def"
|
||||
|
||||
|
||||
@@ -8,49 +8,42 @@
|
||||
-> data: {"size":0}
|
||||
-> data.hex: 0x7B2273697A65223A307D
|
||||
|
||||
> prepare_proposal "abc=123"
|
||||
-> code: OK
|
||||
-> log: Succeeded. Tx: abc=123
|
||||
|
||||
> process_proposal "abc=123"
|
||||
-> code: OK
|
||||
-> status: ACCEPT
|
||||
|
||||
> finalize_block "abc=123"
|
||||
-> code: OK
|
||||
-> code: OK
|
||||
-> data.hex: 0x0200000000000000
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0000000000000000
|
||||
|
||||
> deliver_tx "abc"
|
||||
-> code: OK
|
||||
|
||||
> info
|
||||
-> code: OK
|
||||
-> data: {"size":1}
|
||||
-> data.hex: 0x7B2273697A65223A317D
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0200000000000000
|
||||
|
||||
> query "abc"
|
||||
-> code: OK
|
||||
-> log: exists
|
||||
-> height: 0
|
||||
-> height: 2
|
||||
-> key: abc
|
||||
-> key.hex: 616263
|
||||
-> value: 123
|
||||
-> value.hex: 313233
|
||||
-> value: abc
|
||||
-> value.hex: 616263
|
||||
|
||||
> finalize_block "def=xyz" "ghi=123"
|
||||
> deliver_tx "def=xyz"
|
||||
-> code: OK
|
||||
-> code: OK
|
||||
-> code: OK
|
||||
-> data.hex: 0x0600000000000000
|
||||
|
||||
> commit
|
||||
-> code: OK
|
||||
-> data.hex: 0x0400000000000000
|
||||
|
||||
> query "def"
|
||||
-> code: OK
|
||||
-> log: exists
|
||||
-> height: 0
|
||||
-> height: 3
|
||||
-> key: def
|
||||
-> key.hex: 646566
|
||||
-> value: xyz
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
check_tx "abc"
|
||||
check_tx "def=567"
|
||||
finalize_block "def=567"
|
||||
commit
|
||||
finalize_block "hello=world"
|
||||
commit
|
||||
finalize_block "first=second"
|
||||
commit
|
||||
check_tx 0x00
|
||||
check_tx 0xff
|
||||
deliver_tx 0x00
|
||||
check_tx 0x00
|
||||
deliver_tx 0x01
|
||||
deliver_tx 0x04
|
||||
info
|
||||
|
||||
@@ -1,35 +1,23 @@
|
||||
> check_tx "abc"
|
||||
-> code: 2
|
||||
|
||||
> check_tx "def=567"
|
||||
> check_tx 0x00
|
||||
-> code: OK
|
||||
|
||||
> finalize_block "def=567"
|
||||
-> code: OK
|
||||
-> code: OK
|
||||
-> data.hex: 0x0200000000000000
|
||||
|
||||
> commit
|
||||
> check_tx 0xff
|
||||
-> code: OK
|
||||
|
||||
> finalize_block "hello=world"
|
||||
-> code: OK
|
||||
-> code: OK
|
||||
-> data.hex: 0x0400000000000000
|
||||
|
||||
> commit
|
||||
> deliver_tx 0x00
|
||||
-> code: OK
|
||||
|
||||
> finalize_block "first=second"
|
||||
> check_tx 0x00
|
||||
-> code: OK
|
||||
-> code: OK
|
||||
-> data.hex: 0x0600000000000000
|
||||
|
||||
> commit
|
||||
> deliver_tx 0x01
|
||||
-> code: OK
|
||||
|
||||
> deliver_tx 0x04
|
||||
-> code: OK
|
||||
|
||||
> info
|
||||
-> code: OK
|
||||
-> data: {"size":3}
|
||||
-> data.hex: 0x7B2273697A65223A337D
|
||||
-> data: {"hashes":0,"txs":3}
|
||||
-> data.hex: 0x7B22686173686573223A302C22747873223A337D
|
||||
|
||||
|
||||
@@ -30,42 +30,13 @@ function testExample() {
|
||||
cat "${INPUT}.out.new"
|
||||
echo "Expected:"
|
||||
cat "${INPUT}.out"
|
||||
echo "Diff:"
|
||||
diff "${INPUT}.out" "${INPUT}.out.new"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm "${INPUT}".out.new
|
||||
}
|
||||
|
||||
function testHelp() {
|
||||
INPUT=$1
|
||||
APP="$2 $3"
|
||||
|
||||
echo "Test: $APP"
|
||||
$APP &> "${INPUT}.new" &
|
||||
sleep 2
|
||||
|
||||
pre=$(shasum < "${INPUT}")
|
||||
post=$(shasum < "${INPUT}.new")
|
||||
|
||||
if [[ "$pre" != "$post" ]]; then
|
||||
echo "You broke the tutorial"
|
||||
echo "Got:"
|
||||
cat "${INPUT}.new"
|
||||
echo "Expected:"
|
||||
cat "${INPUT}"
|
||||
echo "Diff:"
|
||||
diff "${INPUT}" "${INPUT}.new"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm "${INPUT}".new
|
||||
}
|
||||
|
||||
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
|
||||
testExample 2 tests/test_cli/ex2.abci abci-cli kvstore
|
||||
testHelp tests/test_cli/testHelp.out abci-cli help
|
||||
|
||||
echo ""
|
||||
echo "PASS"
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
the ABCI CLI tool wraps an ABCI client and is used for testing ABCI servers
|
||||
|
||||
Usage:
|
||||
abci-cli [command]
|
||||
|
||||
Available Commands:
|
||||
batch run a batch of abci commands against an application
|
||||
check_tx validate a transaction
|
||||
commit commit the application state and return the Merkle root hash
|
||||
completion Generate the autocompletion script for the specified shell
|
||||
console start an interactive ABCI console for multiple commands
|
||||
echo have the application echo a message
|
||||
finalize_block deliver a block of transactions to the application
|
||||
help Help about any command
|
||||
info get some info about the application
|
||||
kvstore ABCI demo example
|
||||
prepare_proposal prepare proposal
|
||||
process_proposal process proposal
|
||||
query query the application state
|
||||
test run integration tests
|
||||
version print ABCI console version
|
||||
|
||||
Flags:
|
||||
--abci string either socket or grpc (default "socket")
|
||||
--address string address of application socket (default "tcp://0.0.0.0:26658")
|
||||
-h, --help help for abci-cli
|
||||
--log_level string set the logger level (default "debug")
|
||||
-v, --verbose print the command and results as if it were a console session
|
||||
|
||||
Use "abci-cli [command] --help" for more information about a command.
|
||||
@@ -1,37 +1,33 @@
|
||||
package types
|
||||
|
||||
import "context"
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh Application
|
||||
import (
|
||||
"context"
|
||||
)
|
||||
|
||||
// Application is an interface that enables any finite, deterministic state machine
|
||||
// to be driven by a blockchain-based replication engine via the ABCI.
|
||||
// All methods take a RequestXxx argument and return a ResponseXxx argument,
|
||||
// except CheckTx/DeliverTx, which take `tx []byte`, and `Commit`, which takes nothing.
|
||||
type Application interface {
|
||||
// Info/Query Connection
|
||||
Info(context.Context, *RequestInfo) (*ResponseInfo, error) // Return application info
|
||||
Query(context.Context, *RequestQuery) (*ResponseQuery, error) // Query for state
|
||||
Info(RequestInfo) ResponseInfo // Return application info
|
||||
Query(RequestQuery) ResponseQuery // Query for state
|
||||
|
||||
// Mempool Connection
|
||||
CheckTx(context.Context, *RequestCheckTx) (*ResponseCheckTx, error) // Validate a tx for the mempool
|
||||
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
|
||||
|
||||
// Consensus Connection
|
||||
InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) // Initialize blockchain w validators/other info from TendermintCore
|
||||
PrepareProposal(context.Context, *RequestPrepareProposal) (*ResponsePrepareProposal, error)
|
||||
ProcessProposal(context.Context, *RequestProcessProposal) (*ResponseProcessProposal, error)
|
||||
// Deliver the decided block with its txs to the Application
|
||||
FinalizeBlock(context.Context, *RequestFinalizeBlock) (*ResponseFinalizeBlock, error)
|
||||
// Create application specific vote extension
|
||||
ExtendVote(context.Context, *RequestExtendVote) (*ResponseExtendVote, error)
|
||||
// Verify application's vote extension data
|
||||
VerifyVoteExtension(context.Context, *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error)
|
||||
// Commit the state and return the application Merkle root hash
|
||||
Commit(context.Context, *RequestCommit) (*ResponseCommit, error)
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
|
||||
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
|
||||
DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing
|
||||
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
|
||||
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
|
||||
|
||||
// State Sync Connection
|
||||
ListSnapshots(context.Context, *RequestListSnapshots) (*ResponseListSnapshots, error) // List available snapshots
|
||||
OfferSnapshot(context.Context, *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) // Offer a snapshot to the application
|
||||
LoadSnapshotChunk(context.Context, *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) // Load a snapshot chunk
|
||||
ApplySnapshotChunk(context.Context, *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) // Apply a shapshot chunk
|
||||
ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots
|
||||
OfferSnapshot(RequestOfferSnapshot) ResponseOfferSnapshot // Offer a snapshot to the application
|
||||
LoadSnapshotChunk(RequestLoadSnapshotChunk) ResponseLoadSnapshotChunk // Load a snapshot chunk
|
||||
ApplySnapshotChunk(RequestApplySnapshotChunk) ResponseApplySnapshotChunk // Apply a shapshot chunk
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
@@ -39,81 +35,140 @@ type Application interface {
|
||||
|
||||
var _ Application = (*BaseApplication)(nil)
|
||||
|
||||
type BaseApplication struct{}
|
||||
type BaseApplication struct {
|
||||
}
|
||||
|
||||
func NewBaseApplication() *BaseApplication {
|
||||
return &BaseApplication{}
|
||||
}
|
||||
|
||||
func (BaseApplication) Info(_ context.Context, req *RequestInfo) (*ResponseInfo, error) {
|
||||
return &ResponseInfo{}, nil
|
||||
func (BaseApplication) Info(req RequestInfo) ResponseInfo {
|
||||
return ResponseInfo{}
|
||||
}
|
||||
|
||||
func (BaseApplication) CheckTx(_ context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
|
||||
return &ResponseCheckTx{Code: CodeTypeOK}, nil
|
||||
func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx {
|
||||
return ResponseDeliverTx{Code: CodeTypeOK}
|
||||
}
|
||||
|
||||
func (BaseApplication) Commit(_ context.Context, req *RequestCommit) (*ResponseCommit, error) {
|
||||
return &ResponseCommit{}, nil
|
||||
func (BaseApplication) CheckTx(req RequestCheckTx) ResponseCheckTx {
|
||||
return ResponseCheckTx{Code: CodeTypeOK}
|
||||
}
|
||||
|
||||
func (BaseApplication) Query(_ context.Context, req *RequestQuery) (*ResponseQuery, error) {
|
||||
return &ResponseQuery{Code: CodeTypeOK}, nil
|
||||
func (BaseApplication) Commit() ResponseCommit {
|
||||
return ResponseCommit{}
|
||||
}
|
||||
|
||||
func (BaseApplication) InitChain(_ context.Context, req *RequestInitChain) (*ResponseInitChain, error) {
|
||||
return &ResponseInitChain{}, nil
|
||||
func (BaseApplication) Query(req RequestQuery) ResponseQuery {
|
||||
return ResponseQuery{Code: CodeTypeOK}
|
||||
}
|
||||
|
||||
func (BaseApplication) ListSnapshots(_ context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) {
|
||||
return &ResponseListSnapshots{}, nil
|
||||
func (BaseApplication) InitChain(req RequestInitChain) ResponseInitChain {
|
||||
return ResponseInitChain{}
|
||||
}
|
||||
|
||||
func (BaseApplication) OfferSnapshot(_ context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) {
|
||||
return &ResponseOfferSnapshot{}, nil
|
||||
func (BaseApplication) BeginBlock(req RequestBeginBlock) ResponseBeginBlock {
|
||||
return ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
func (BaseApplication) LoadSnapshotChunk(_ context.Context, _ *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) {
|
||||
return &ResponseLoadSnapshotChunk{}, nil
|
||||
func (BaseApplication) EndBlock(req RequestEndBlock) ResponseEndBlock {
|
||||
return ResponseEndBlock{}
|
||||
}
|
||||
|
||||
func (BaseApplication) ApplySnapshotChunk(_ context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) {
|
||||
return &ResponseApplySnapshotChunk{}, nil
|
||||
func (BaseApplication) ListSnapshots(req RequestListSnapshots) ResponseListSnapshots {
|
||||
return ResponseListSnapshots{}
|
||||
}
|
||||
|
||||
func (BaseApplication) PrepareProposal(_ context.Context, req *RequestPrepareProposal) (*ResponsePrepareProposal, error) {
|
||||
txs := make([][]byte, 0, len(req.Txs))
|
||||
var totalBytes int64
|
||||
for _, tx := range req.Txs {
|
||||
totalBytes += int64(len(tx))
|
||||
if totalBytes > req.MaxTxBytes {
|
||||
break
|
||||
}
|
||||
txs = append(txs, tx)
|
||||
}
|
||||
return &ResponsePrepareProposal{Txs: txs}, nil
|
||||
func (BaseApplication) OfferSnapshot(req RequestOfferSnapshot) ResponseOfferSnapshot {
|
||||
return ResponseOfferSnapshot{}
|
||||
}
|
||||
|
||||
func (BaseApplication) ProcessProposal(_ context.Context, req *RequestProcessProposal) (*ResponseProcessProposal, error) {
|
||||
return &ResponseProcessProposal{Status: ResponseProcessProposal_ACCEPT}, nil
|
||||
func (BaseApplication) LoadSnapshotChunk(req RequestLoadSnapshotChunk) ResponseLoadSnapshotChunk {
|
||||
return ResponseLoadSnapshotChunk{}
|
||||
}
|
||||
|
||||
func (BaseApplication) ExtendVote(_ context.Context, req *RequestExtendVote) (*ResponseExtendVote, error) {
|
||||
return &ResponseExtendVote{}, nil
|
||||
func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) ResponseApplySnapshotChunk {
|
||||
return ResponseApplySnapshotChunk{}
|
||||
}
|
||||
|
||||
func (BaseApplication) VerifyVoteExtension(_ context.Context, req *RequestVerifyVoteExtension) (*ResponseVerifyVoteExtension, error) {
|
||||
return &ResponseVerifyVoteExtension{
|
||||
Status: ResponseVerifyVoteExtension_ACCEPT,
|
||||
}, nil
|
||||
//-------------------------------------------------------
|
||||
|
||||
// GRPCApplication is a GRPC wrapper for Application
|
||||
type GRPCApplication struct {
|
||||
app Application
|
||||
}
|
||||
|
||||
func (BaseApplication) FinalizeBlock(_ context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) {
|
||||
txs := make([]*ExecTxResult, len(req.Txs))
|
||||
for i := range req.Txs {
|
||||
txs[i] = &ExecTxResult{Code: CodeTypeOK}
|
||||
}
|
||||
return &ResponseFinalizeBlock{
|
||||
TxResults: txs,
|
||||
}, nil
|
||||
func NewGRPCApplication(app Application) *GRPCApplication {
|
||||
return &GRPCApplication{app}
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) Echo(ctx context.Context, req *RequestEcho) (*ResponseEcho, error) {
|
||||
return &ResponseEcho{Message: req.Message}, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) Flush(ctx context.Context, req *RequestFlush) (*ResponseFlush, error) {
|
||||
return &ResponseFlush{}, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*ResponseInfo, error) {
|
||||
res := app.app.Info(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
|
||||
res := app.app.DeliverTx(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
|
||||
res := app.app.CheckTx(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) Query(ctx context.Context, req *RequestQuery) (*ResponseQuery, error) {
|
||||
res := app.app.Query(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) Commit(ctx context.Context, req *RequestCommit) (*ResponseCommit, error) {
|
||||
res := app.app.Commit()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) InitChain(ctx context.Context, req *RequestInitChain) (*ResponseInitChain, error) {
|
||||
res := app.app.InitChain(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) BeginBlock(ctx context.Context, req *RequestBeginBlock) (*ResponseBeginBlock, error) {
|
||||
res := app.app.BeginBlock(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) {
|
||||
res := app.app.EndBlock(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) ListSnapshots(
|
||||
ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) {
|
||||
res := app.app.ListSnapshots(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) OfferSnapshot(
|
||||
ctx context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) {
|
||||
res := app.app.OfferSnapshot(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) LoadSnapshotChunk(
|
||||
ctx context.Context, req *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) {
|
||||
res := app.app.LoadSnapshotChunk(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) ApplySnapshotChunk(
|
||||
ctx context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) {
|
||||
res := app.app.ApplySnapshotChunk(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
@@ -3,9 +3,8 @@ package types
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/protoio"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/tendermint/tendermint/internal/libs/protoio"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -16,7 +15,11 @@ const (
|
||||
func WriteMessage(msg proto.Message, w io.Writer) error {
|
||||
protoWriter := protoio.NewDelimitedWriter(w)
|
||||
_, err := protoWriter.WriteMsg(msg)
|
||||
return err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadMessage reads a varint length-delimited protobuf message.
|
||||
@@ -39,15 +42,21 @@ func ToRequestFlush() *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestInfo(req *RequestInfo) *Request {
|
||||
func ToRequestInfo(req RequestInfo) *Request {
|
||||
return &Request{
|
||||
Value: &Request_Info{req},
|
||||
Value: &Request_Info{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestCheckTx(req *RequestCheckTx) *Request {
|
||||
func ToRequestDeliverTx(req RequestDeliverTx) *Request {
|
||||
return &Request{
|
||||
Value: &Request_CheckTx{req},
|
||||
Value: &Request_DeliverTx{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestCheckTx(req RequestCheckTx) *Request {
|
||||
return &Request{
|
||||
Value: &Request_CheckTx{&req},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,69 +66,51 @@ func ToRequestCommit() *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestQuery(req *RequestQuery) *Request {
|
||||
func ToRequestQuery(req RequestQuery) *Request {
|
||||
return &Request{
|
||||
Value: &Request_Query{req},
|
||||
Value: &Request_Query{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestInitChain(req *RequestInitChain) *Request {
|
||||
func ToRequestInitChain(req RequestInitChain) *Request {
|
||||
return &Request{
|
||||
Value: &Request_InitChain{req},
|
||||
Value: &Request_InitChain{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestListSnapshots(req *RequestListSnapshots) *Request {
|
||||
func ToRequestBeginBlock(req RequestBeginBlock) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ListSnapshots{req},
|
||||
Value: &Request_BeginBlock{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestOfferSnapshot(req *RequestOfferSnapshot) *Request {
|
||||
func ToRequestEndBlock(req RequestEndBlock) *Request {
|
||||
return &Request{
|
||||
Value: &Request_OfferSnapshot{req},
|
||||
Value: &Request_EndBlock{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestLoadSnapshotChunk(req *RequestLoadSnapshotChunk) *Request {
|
||||
func ToRequestListSnapshots(req RequestListSnapshots) *Request {
|
||||
return &Request{
|
||||
Value: &Request_LoadSnapshotChunk{req},
|
||||
Value: &Request_ListSnapshots{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestApplySnapshotChunk(req *RequestApplySnapshotChunk) *Request {
|
||||
func ToRequestOfferSnapshot(req RequestOfferSnapshot) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ApplySnapshotChunk{req},
|
||||
Value: &Request_OfferSnapshot{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestPrepareProposal(req *RequestPrepareProposal) *Request {
|
||||
func ToRequestLoadSnapshotChunk(req RequestLoadSnapshotChunk) *Request {
|
||||
return &Request{
|
||||
Value: &Request_PrepareProposal{req},
|
||||
Value: &Request_LoadSnapshotChunk{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestProcessProposal(req *RequestProcessProposal) *Request {
|
||||
func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ProcessProposal{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestExtendVote(req *RequestExtendVote) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ExtendVote{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestVerifyVoteExtension(req *RequestVerifyVoteExtension) *Request {
|
||||
return &Request{
|
||||
Value: &Request_VerifyVoteExtension{req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestFinalizeBlock(req *RequestFinalizeBlock) *Request {
|
||||
return &Request{
|
||||
Value: &Request_FinalizeBlock{req},
|
||||
Value: &Request_ApplySnapshotChunk{&req},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,86 +134,73 @@ func ToResponseFlush() *Response {
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseInfo(res *ResponseInfo) *Response {
|
||||
func ToResponseInfo(res ResponseInfo) *Response {
|
||||
return &Response{
|
||||
Value: &Response_Info{res},
|
||||
Value: &Response_Info{&res},
|
||||
}
|
||||
}
|
||||
func ToResponseDeliverTx(res ResponseDeliverTx) *Response {
|
||||
return &Response{
|
||||
Value: &Response_DeliverTx{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseCheckTx(res *ResponseCheckTx) *Response {
|
||||
func ToResponseCheckTx(res ResponseCheckTx) *Response {
|
||||
return &Response{
|
||||
Value: &Response_CheckTx{res},
|
||||
Value: &Response_CheckTx{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseCommit(res *ResponseCommit) *Response {
|
||||
func ToResponseCommit(res ResponseCommit) *Response {
|
||||
return &Response{
|
||||
Value: &Response_Commit{res},
|
||||
Value: &Response_Commit{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseQuery(res *ResponseQuery) *Response {
|
||||
func ToResponseQuery(res ResponseQuery) *Response {
|
||||
return &Response{
|
||||
Value: &Response_Query{res},
|
||||
Value: &Response_Query{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseInitChain(res *ResponseInitChain) *Response {
|
||||
func ToResponseInitChain(res ResponseInitChain) *Response {
|
||||
return &Response{
|
||||
Value: &Response_InitChain{res},
|
||||
Value: &Response_InitChain{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseListSnapshots(res *ResponseListSnapshots) *Response {
|
||||
func ToResponseBeginBlock(res ResponseBeginBlock) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ListSnapshots{res},
|
||||
Value: &Response_BeginBlock{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseOfferSnapshot(res *ResponseOfferSnapshot) *Response {
|
||||
func ToResponseEndBlock(res ResponseEndBlock) *Response {
|
||||
return &Response{
|
||||
Value: &Response_OfferSnapshot{res},
|
||||
Value: &Response_EndBlock{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseLoadSnapshotChunk(res *ResponseLoadSnapshotChunk) *Response {
|
||||
func ToResponseListSnapshots(res ResponseListSnapshots) *Response {
|
||||
return &Response{
|
||||
Value: &Response_LoadSnapshotChunk{res},
|
||||
Value: &Response_ListSnapshots{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseApplySnapshotChunk(res *ResponseApplySnapshotChunk) *Response {
|
||||
func ToResponseOfferSnapshot(res ResponseOfferSnapshot) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ApplySnapshotChunk{res},
|
||||
Value: &Response_OfferSnapshot{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponsePrepareProposal(res *ResponsePrepareProposal) *Response {
|
||||
func ToResponseLoadSnapshotChunk(res ResponseLoadSnapshotChunk) *Response {
|
||||
return &Response{
|
||||
Value: &Response_PrepareProposal{res},
|
||||
Value: &Response_LoadSnapshotChunk{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseProcessProposal(res *ResponseProcessProposal) *Response {
|
||||
func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ProcessProposal{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseExtendVote(res *ResponseExtendVote) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ExtendVote{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseVerifyVoteExtension(res *ResponseVerifyVoteExtension) *Response {
|
||||
return &Response{
|
||||
Value: &Response_VerifyVoteExtension{res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseFinalizeBlock(res *ResponseFinalizeBlock) *Response {
|
||||
return &Response{
|
||||
Value: &Response_FinalizeBlock{res},
|
||||
Value: &Response_ApplySnapshotChunk{&res},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,15 +6,15 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/cosmos/gogoproto/proto"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
)
|
||||
|
||||
func TestMarshalJSON(t *testing.T) {
|
||||
b, err := json.Marshal(&ExecTxResult{Code: 1})
|
||||
assert.NoError(t, err)
|
||||
b, err := json.Marshal(&ResponseDeliverTx{})
|
||||
assert.Nil(t, err)
|
||||
// include empty fields.
|
||||
assert.True(t, strings.Contains(string(b), "code"))
|
||||
r1 := ResponseCheckTx{
|
||||
|
||||
@@ -1,352 +0,0 @@
|
||||
// Code generated by mockery. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
// Application is an autogenerated mock type for the Application type
|
||||
type Application struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// ApplySnapshotChunk provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) ApplySnapshotChunk(_a0 context.Context, _a1 *types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CheckTx provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) CheckTx(_a0 context.Context, _a1 *types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCheckTx)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Commit provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) Commit(_a0 context.Context, _a1 *types.RequestCommit) (*types.ResponseCommit, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseCommit
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestCommit) *types.ResponseCommit); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCommit)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestCommit) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ExtendVote provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) ExtendVote(_a0 context.Context, _a1 *types.RequestExtendVote) (*types.ResponseExtendVote, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseExtendVote
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestExtendVote) *types.ResponseExtendVote); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseExtendVote)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestExtendVote) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// FinalizeBlock provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) FinalizeBlock(_a0 context.Context, _a1 *types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseFinalizeBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseFinalizeBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestFinalizeBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Info provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) Info(_a0 context.Context, _a1 *types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInfo) *types.ResponseInfo); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInfo)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInfo) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChain provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) InitChain(_a0 context.Context, _a1 *types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInitChain
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestInitChain) *types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInitChain)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ListSnapshots provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) ListSnapshots(_a0 context.Context, _a1 *types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseListSnapshots
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseListSnapshots)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// LoadSnapshotChunk provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) LoadSnapshotChunk(_a0 context.Context, _a1 *types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseLoadSnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// OfferSnapshot provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) OfferSnapshot(_a0 context.Context, _a1 *types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseOfferSnapshot
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseOfferSnapshot)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// PrepareProposal provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) PrepareProposal(_a0 context.Context, _a1 *types.RequestPrepareProposal) (*types.ResponsePrepareProposal, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponsePrepareProposal
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestPrepareProposal) *types.ResponsePrepareProposal); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponsePrepareProposal)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestPrepareProposal) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ProcessProposal provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) ProcessProposal(_a0 context.Context, _a1 *types.RequestProcessProposal) (*types.ResponseProcessProposal, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseProcessProposal
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestProcessProposal) *types.ResponseProcessProposal); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseProcessProposal)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestProcessProposal) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Query provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) Query(_a0 context.Context, _a1 *types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseQuery
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestQuery) *types.ResponseQuery); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseQuery)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// VerifyVoteExtension provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Application) VerifyVoteExtension(_a0 context.Context, _a1 *types.RequestVerifyVoteExtension) (*types.ResponseVerifyVoteExtension, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseVerifyVoteExtension
|
||||
if rf, ok := ret.Get(0).(func(context.Context, *types.RequestVerifyVoteExtension) *types.ResponseVerifyVoteExtension); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseVerifyVoteExtension)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, *types.RequestVerifyVoteExtension) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
type mockConstructorTestingTNewApplication interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}
|
||||
|
||||
// NewApplication creates a new instance of Application. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
func NewApplication(t mockConstructorTestingTNewApplication) *Application {
|
||||
mock := &Application{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/crypto/secp256k1"
|
||||
"github.com/tendermint/tendermint/crypto/sr25519"
|
||||
)
|
||||
|
||||
func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate {
|
||||
@@ -17,7 +18,6 @@ func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate {
|
||||
}
|
||||
|
||||
return ValidatorUpdate{
|
||||
// Address:
|
||||
PubKey: pkp,
|
||||
Power: power,
|
||||
}
|
||||
@@ -34,7 +34,16 @@ func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate {
|
||||
panic(err)
|
||||
}
|
||||
return ValidatorUpdate{
|
||||
// Address:
|
||||
PubKey: pkp,
|
||||
Power: power,
|
||||
}
|
||||
case sr25519.KeyType:
|
||||
pke := sr25519.PubKey(pk)
|
||||
pkp, err := cryptoenc.PubKeyToProto(pke)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ValidatorUpdate{
|
||||
PubKey: pkp,
|
||||
Power: power,
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
|
||||
"github.com/cosmos/gogoproto/jsonpb"
|
||||
"github.com/gogo/protobuf/jsonpb"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -22,12 +22,12 @@ func (r ResponseCheckTx) IsErr() bool {
|
||||
}
|
||||
|
||||
// IsOK returns true if Code is OK.
|
||||
func (r ExecTxResult) IsOK() bool {
|
||||
func (r ResponseDeliverTx) IsOK() bool {
|
||||
return r.Code == CodeTypeOK
|
||||
}
|
||||
|
||||
// IsErr returns true if Code is something other than OK.
|
||||
func (r ExecTxResult) IsErr() bool {
|
||||
func (r ResponseDeliverTx) IsErr() bool {
|
||||
return r.Code != CodeTypeOK
|
||||
}
|
||||
|
||||
@@ -41,20 +41,6 @@ func (r ResponseQuery) IsErr() bool {
|
||||
return r.Code != CodeTypeOK
|
||||
}
|
||||
|
||||
// IsAccepted returns true if Code is ACCEPT
|
||||
func (r ResponseProcessProposal) IsAccepted() bool {
|
||||
return r.Status == ResponseProcessProposal_ACCEPT
|
||||
}
|
||||
|
||||
// IsStatusUnknown returns true if Code is UNKNOWN
|
||||
func (r ResponseProcessProposal) IsStatusUnknown() bool {
|
||||
return r.Status == ResponseProcessProposal_UNKNOWN
|
||||
}
|
||||
|
||||
func (r ResponseVerifyVoteExtension) IsAccepted() bool {
|
||||
return r.Status == ResponseVerifyVoteExtension_ACCEPT
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// override JSON marshaling so we emit defaults (ie. disable omitempty)
|
||||
|
||||
@@ -76,12 +62,12 @@ func (r *ResponseCheckTx) UnmarshalJSON(b []byte) error {
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
|
||||
func (r *ExecTxResult) MarshalJSON() ([]byte, error) {
|
||||
func (r *ResponseDeliverTx) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
}
|
||||
|
||||
func (r *ExecTxResult) UnmarshalJSON(b []byte) error {
|
||||
func (r *ResponseDeliverTx) UnmarshalJSON(b []byte) error {
|
||||
reader := bytes.NewBuffer(b)
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
@@ -128,38 +114,7 @@ type jsonRoundTripper interface {
|
||||
|
||||
var _ jsonRoundTripper = (*ResponseCommit)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseQuery)(nil)
|
||||
var _ jsonRoundTripper = (*ExecTxResult)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseDeliverTx)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseCheckTx)(nil)
|
||||
|
||||
var _ jsonRoundTripper = (*EventAttribute)(nil)
|
||||
|
||||
// deterministicExecTxResult constructs a copy of response that omits
|
||||
// non-deterministic fields. The input response is not modified.
|
||||
func deterministicExecTxResult(response *ExecTxResult) *ExecTxResult {
|
||||
return &ExecTxResult{
|
||||
Code: response.Code,
|
||||
Data: response.Data,
|
||||
GasWanted: response.GasWanted,
|
||||
GasUsed: response.GasUsed,
|
||||
}
|
||||
}
|
||||
|
||||
// MarshalTxResults encodes the the TxResults as a list of byte
|
||||
// slices. It strips off the non-deterministic pieces of the TxResults
|
||||
// so that the resulting data can be used for hash comparisons and used
|
||||
// in Merkle proofs.
|
||||
func MarshalTxResults(r []*ExecTxResult) ([][]byte, error) {
|
||||
s := make([][]byte, len(r))
|
||||
for i, e := range r {
|
||||
d := deterministicExecTxResult(e)
|
||||
b, err := d.Marshal()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s[i] = b
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// -----------------------------------------------
|
||||
// construct Result data
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user