mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-14 16:52:49 +00:00
Compare commits
629 Commits
docs-stagi
...
removes-fo
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
afaed78146 | ||
|
|
596ca4e591 | ||
|
|
28b7cd6d6e | ||
|
|
6321e4023d | ||
|
|
22c68d7b41 | ||
|
|
2ca8a8cc43 | ||
|
|
c8302c5fcb | ||
|
|
1069ffc6aa | ||
|
|
3003e05581 | ||
|
|
498657f128 | ||
|
|
c322b89b2a | ||
|
|
cbc7a1abcf | ||
|
|
f36999e484 | ||
|
|
ae1fc74f80 | ||
|
|
a6dde14ec4 | ||
|
|
152a2fa5c9 | ||
|
|
f295b4d431 | ||
|
|
0bea0647fe | ||
|
|
d5ec276052 | ||
|
|
bff63aec83 | ||
|
|
69845bb44e | ||
|
|
10e1ac8fea | ||
|
|
74dd21eb89 | ||
|
|
ad1f9b49bc | ||
|
|
ef4e37b532 | ||
|
|
03c79b666d | ||
|
|
1148759a94 | ||
|
|
608933b73e | ||
|
|
b92a19b2ce | ||
|
|
6b499aeb31 | ||
|
|
88186f76fc | ||
|
|
4f30c90e62 | ||
|
|
ea271c534a | ||
|
|
4206a0e9b7 | ||
|
|
eb762cf5d7 | ||
|
|
b6a515a818 | ||
|
|
07d242c461 | ||
|
|
1e9d81fb8a | ||
|
|
5305fa79cf | ||
|
|
49ec3b9780 | ||
|
|
439d84afa1 | ||
|
|
13da93a430 | ||
|
|
476f8fa7a8 | ||
|
|
75d51e18f7 | ||
|
|
e32b86b549 | ||
|
|
85636f6a00 | ||
|
|
8834a0dbae | ||
|
|
4309f54349 | ||
|
|
c4e235243b | ||
|
|
18c7e3aae5 | ||
|
|
a7049ed3fc | ||
|
|
0323bd5365 | ||
|
|
a11b7743d4 | ||
|
|
1ad0683189 | ||
|
|
46badfabd9 | ||
|
|
eb465a3f4f | ||
|
|
d6be59788b | ||
|
|
f9f3bed808 | ||
|
|
64dfeb775c | ||
|
|
6688db7be1 | ||
|
|
d32df22c8b | ||
|
|
223ece93c8 | ||
|
|
ba1711e706 | ||
|
|
8df725f92f | ||
|
|
2b3737333f | ||
|
|
bbb5f3bfef | ||
|
|
d6b413ff8e | ||
|
|
7b615f8123 | ||
|
|
7d9447198c | ||
|
|
5276400c30 | ||
|
|
493dd69f31 | ||
|
|
1d9d947d88 | ||
|
|
479bdd71e6 | ||
|
|
17f4ea3680 | ||
|
|
5c32cfa00e | ||
|
|
5e354a3a57 | ||
|
|
9e14e954f9 | ||
|
|
6b7d30cf37 | ||
|
|
25101d1116 | ||
|
|
b83cc0aeda | ||
|
|
4a1df4911d | ||
|
|
a3cc3d98b9 | ||
|
|
fe024521ef | ||
|
|
02def9ca64 | ||
|
|
ce2409f3ff | ||
|
|
30915e9337 | ||
|
|
8a7affe3a0 | ||
|
|
851f404305 | ||
|
|
f63496dcd6 | ||
|
|
044b12585f | ||
|
|
ac2e7fab3d | ||
|
|
e6f0711648 | ||
|
|
603a1d6610 | ||
|
|
ad72896ca5 | ||
|
|
9afdac6b52 | ||
|
|
a694dad540 | ||
|
|
9d1556a5bc | ||
|
|
7ae73be891 | ||
|
|
bbb1506f5e | ||
|
|
7cb014cf27 | ||
|
|
004967f962 | ||
|
|
6670c24e5f | ||
|
|
b685ec7f2d | ||
|
|
016c91b50f | ||
|
|
34f23ab88a | ||
|
|
aef0bd874d | ||
|
|
d2bd4471bc | ||
|
|
c7006af6fd | ||
|
|
972eee6ebc | ||
|
|
b06540b5ff | ||
|
|
d1213f7e5f | ||
|
|
624bbac8f6 | ||
|
|
874c9a0951 | ||
|
|
986f8d6532 | ||
|
|
e40e7ea46e | ||
|
|
7fa34c63af | ||
|
|
da6ec8f082 | ||
|
|
efddab0734 | ||
|
|
3454f8cb89 | ||
|
|
2f231ceb95 | ||
|
|
6e85f46d9a | ||
|
|
89139784c5 | ||
|
|
ecf19029b4 | ||
|
|
9ae5797866 | ||
|
|
07670318a9 | ||
|
|
1e32a149dd | ||
|
|
2c553d735a | ||
|
|
799489e474 | ||
|
|
66b1a3ee4c | ||
|
|
06e8620621 | ||
|
|
b2dd100a76 | ||
|
|
314b139ac3 | ||
|
|
551072c962 | ||
|
|
4b5472c387 | ||
|
|
fd3bfb38e7 | ||
|
|
186e0e4df2 | ||
|
|
a97bb37d44 | ||
|
|
9e8837ad63 | ||
|
|
6b4e9078de | ||
|
|
1d25a3f0bc | ||
|
|
96085df7c1 | ||
|
|
cb6baad5ac | ||
|
|
db60bbad54 | ||
|
|
5487718cff | ||
|
|
cf58c4191b | ||
|
|
ce70b10f81 | ||
|
|
98c75c9429 | ||
|
|
9fe245025f | ||
|
|
de423678eb | ||
|
|
6a14fc2105 | ||
|
|
89bb82617a | ||
|
|
df9b1676f9 | ||
|
|
f88aad5903 | ||
|
|
75c6af7dcf | ||
|
|
bc63f213da | ||
|
|
80f656d8d7 | ||
|
|
f36cc80568 | ||
|
|
c477c810f3 | ||
|
|
3757810247 | ||
|
|
3b467f951d | ||
|
|
b14dc70664 | ||
|
|
2cbb35f980 | ||
|
|
c9c570e151 | ||
|
|
e6700355f6 | ||
|
|
40f18b8d8f | ||
|
|
4d0b6e7c5a | ||
|
|
6695e525f9 | ||
|
|
6eeb1b3a5d | ||
|
|
cac59a7677 | ||
|
|
dfd5bae784 | ||
|
|
41c176ccc6 | ||
|
|
05340ca069 | ||
|
|
9994396e59 | ||
|
|
c4834df3f3 | ||
|
|
12e3419f2b | ||
|
|
9ec863f948 | ||
|
|
d0031b0503 | ||
|
|
d35b50b528 | ||
|
|
bd48acb2ca | ||
|
|
0b835bea7a | ||
|
|
12ecfb0383 | ||
|
|
3e7fc468e4 | ||
|
|
113118ec00 | ||
|
|
4ef140f6ca | ||
|
|
61831cf5ef | ||
|
|
8a2dcbafae | ||
|
|
3e119fc6c4 | ||
|
|
f721bf5154 | ||
|
|
3567d3ab38 | ||
|
|
46a6691e11 | ||
|
|
876b3c0dbe | ||
|
|
31b3e279fc | ||
|
|
85870def7b | ||
|
|
ff2758b32e | ||
|
|
a82cb7dcda | ||
|
|
1dfb3451ea | ||
|
|
9f13b9b083 | ||
|
|
16ba782fa6 | ||
|
|
474ed04273 | ||
|
|
2d8287d0f7 | ||
|
|
fa2ccc80da | ||
|
|
294a9695b4 | ||
|
|
60d6856782 | ||
|
|
e2a038e039 | ||
|
|
849461aab2 | ||
|
|
8ba6d218e4 | ||
|
|
0f8932f4ef | ||
|
|
f793752d07 | ||
|
|
bf71990d2f | ||
|
|
73ef2675ce | ||
|
|
41e681293c | ||
|
|
e0c6199aae | ||
|
|
0c05841902 | ||
|
|
c939e155a6 | ||
|
|
4023580a25 | ||
|
|
26ee62aa52 | ||
|
|
2db1e422d8 | ||
|
|
e44ab95f2f | ||
|
|
4c3339ab6a | ||
|
|
093961ae2d | ||
|
|
d030cddca0 | ||
|
|
3dff227c5b | ||
|
|
e290bd624f | ||
|
|
1e985f6226 | ||
|
|
0366c2b688 | ||
|
|
72adbf9cc9 | ||
|
|
8029cf7a0f | ||
|
|
6fde228e9d | ||
|
|
ed7fa80693 | ||
|
|
8f9cd23016 | ||
|
|
24f22eeb52 | ||
|
|
f790b6f903 | ||
|
|
0ff67d6b1e | ||
|
|
b69ac23fd2 | ||
|
|
da9eefd111 | ||
|
|
aa8f656573 | ||
|
|
2c2f511f24 | ||
|
|
8b84c7c168 | ||
|
|
6039594121 | ||
|
|
24222c5855 | ||
|
|
0712063ec8 | ||
|
|
6bd5263515 | ||
|
|
c2908ef785 | ||
|
|
d515bbcf1d | ||
|
|
be8c9833ca | ||
|
|
89d381f7cf | ||
|
|
358b1f23c0 | ||
|
|
5559e14355 | ||
|
|
a2a9ffbe7e | ||
|
|
c376b44f1c | ||
|
|
8dd91a7ac3 | ||
|
|
f3216e6953 | ||
|
|
90434cb74d | ||
|
|
aba090a69a | ||
|
|
048f6a32f9 | ||
|
|
4a9bcebe2a | ||
|
|
8dd8a4e8ea | ||
|
|
4b79bccc0b | ||
|
|
292828a01b | ||
|
|
5dfaa54350 | ||
|
|
00446bb9f4 | ||
|
|
255942e8c7 | ||
|
|
353e3a3243 | ||
|
|
a9b4fac610 | ||
|
|
1614e12035 | ||
|
|
84ee4249ae | ||
|
|
b39af911ae | ||
|
|
68eceda0b5 | ||
|
|
b878326396 | ||
|
|
693e11c6c6 | ||
|
|
6cc3e23a95 | ||
|
|
a9ac63510d | ||
|
|
0dc5d4df07 | ||
|
|
bd968aba1f | ||
|
|
e54fdb6204 | ||
|
|
7869f5ec1d | ||
|
|
ea8238f090 | ||
|
|
af35ca9cf4 | ||
|
|
c9966cd6be | ||
|
|
6c0c27320c | ||
|
|
b7a4d5e7ba | ||
|
|
0682337de2 | ||
|
|
640b71038b | ||
|
|
5c32ebcda8 | ||
|
|
b2465e0c3a | ||
|
|
b00cac9368 | ||
|
|
b2f01448be | ||
|
|
4e25703d58 | ||
|
|
9f6a4bcf23 | ||
|
|
b4a31746dd | ||
|
|
d004a584f8 | ||
|
|
11523b1302 | ||
|
|
8bb85856d0 | ||
|
|
b9cdd0e28e | ||
|
|
1b5697a41d | ||
|
|
a047a4a70f | ||
|
|
52b1d90f56 | ||
|
|
b270ab8d15 | ||
|
|
28bebe3ddb | ||
|
|
227e5269ca | ||
|
|
dea73e08b3 | ||
|
|
28ce355656 | ||
|
|
55ae781efa | ||
|
|
0191a22636 | ||
|
|
9d9b947b02 | ||
|
|
c6e0d20d4b | ||
|
|
efd9d07257 | ||
|
|
a0f376127d | ||
|
|
8d3c36ccc3 | ||
|
|
15eb2c2211 | ||
|
|
e4d2893ff6 | ||
|
|
afd07096a7 | ||
|
|
340071d81b | ||
|
|
53d40e1092 | ||
|
|
bedb00d252 | ||
|
|
b315f04980 | ||
|
|
1030072dd0 | ||
|
|
1b2174a0da | ||
|
|
6bac9d9f43 | ||
|
|
5efbbab789 | ||
|
|
4a0fab041b | ||
|
|
5ee2ada942 | ||
|
|
fbf2c3815d | ||
|
|
cc57a560e7 | ||
|
|
950c9f71b5 | ||
|
|
90a2c33285 | ||
|
|
093dcfc8a0 | ||
|
|
72851a12d3 | ||
|
|
07979d88d0 | ||
|
|
12eac92738 | ||
|
|
73375b0912 | ||
|
|
e3a79d4e2e | ||
|
|
fa3287c012 | ||
|
|
cb7c9564a4 | ||
|
|
9df5fcf1f1 | ||
|
|
d575f8a38f | ||
|
|
1e355b6b56 | ||
|
|
108073077b | ||
|
|
8b48d23084 | ||
|
|
c3d2f68c05 | ||
|
|
0f58a8470a | ||
|
|
197b746f8d | ||
|
|
06623202f0 | ||
|
|
a3a9398971 | ||
|
|
7b7d6e1f98 | ||
|
|
98be3f2aab | ||
|
|
3e41bb57d6 | ||
|
|
6252b63e53 | ||
|
|
591e55b301 | ||
|
|
0028ac38ed | ||
|
|
57aed01639 | ||
|
|
8788673a3e | ||
|
|
f009a1a731 | ||
|
|
33fb03fcc8 | ||
|
|
eb09376ba0 | ||
|
|
f48b154751 | ||
|
|
2dd5cbfb5c | ||
|
|
3c22ed8320 | ||
|
|
7f02d8971c | ||
|
|
b021ad5b7a | ||
|
|
abaffef912 | ||
|
|
038f3e025a | ||
|
|
f89eca427a | ||
|
|
0213e544e0 | ||
|
|
2f590a6392 | ||
|
|
72d15a4b07 | ||
|
|
6b2ab0f0e1 | ||
|
|
a2a6852ab9 | ||
|
|
7ea4dc52ed | ||
|
|
d969a5ed1b | ||
|
|
0def3a964a | ||
|
|
54338a52fa | ||
|
|
bf45df0b2b | ||
|
|
46fa6e666c | ||
|
|
a18e3de3ac | ||
|
|
e8d35597df | ||
|
|
1b2b24055c | ||
|
|
bdbe4a7cd7 | ||
|
|
6a7a431ba5 | ||
|
|
23c8a7a93d | ||
|
|
cf3a720988 | ||
|
|
ad552b2bb1 | ||
|
|
d260ff3e37 | ||
|
|
a4672048e7 | ||
|
|
fc569173a1 | ||
|
|
ce146d00d7 | ||
|
|
5d63765990 | ||
|
|
3185bb8b22 | ||
|
|
2eba38051a | ||
|
|
15eed81f12 | ||
|
|
fca7c6449a | ||
|
|
c2b5f8bc4a | ||
|
|
4246000a8c | ||
|
|
2924d41f8b | ||
|
|
13833cba9e | ||
|
|
17ce2ccc92 | ||
|
|
b1328db07f | ||
|
|
829a9e1de7 | ||
|
|
dc101f2eff | ||
|
|
dc90cf60d5 | ||
|
|
439a5bcacb | ||
|
|
accd7ffe18 | ||
|
|
42751ea4f3 | ||
|
|
acb9a7d734 | ||
|
|
31cfa53082 | ||
|
|
26ef2ccddb | ||
|
|
9f0d71e81f | ||
|
|
7f06371915 | ||
|
|
2a4fd3804c | ||
|
|
0d9606e1b4 | ||
|
|
6abcb13dab | ||
|
|
ce144a1d71 | ||
|
|
6c0d4070c2 | ||
|
|
033608bbf1 | ||
|
|
15b70373cc | ||
|
|
182fa32851 | ||
|
|
fe94825985 | ||
|
|
386a44cd02 | ||
|
|
0f29b1631e | ||
|
|
b80d4d8ff0 | ||
|
|
b5b53bfc0d | ||
|
|
4ed0fddc37 | ||
|
|
23bc2f690c | ||
|
|
bea7673c1c | ||
|
|
26493bbbd8 | ||
|
|
871d0514cd | ||
|
|
53463b3fef | ||
|
|
e0cf94f5b0 | ||
|
|
047b5ea85e | ||
|
|
9567477d55 | ||
|
|
637d76254d | ||
|
|
a447c507e4 | ||
|
|
24d13479fe | ||
|
|
9c32ad4a02 | ||
|
|
de0bef5db5 | ||
|
|
0a4432baf5 | ||
|
|
0bdc76a78c | ||
|
|
a2addecb3d | ||
|
|
c1ff62fe44 | ||
|
|
66e9106b4d | ||
|
|
d5e0294003 | ||
|
|
32b811a1fb | ||
|
|
819e89ac7a | ||
|
|
54a0940e40 | ||
|
|
cf03759ff5 | ||
|
|
25fafb30b5 | ||
|
|
59f3f63d33 | ||
|
|
9d354c842e | ||
|
|
9fce8480b0 | ||
|
|
d31a4a4b34 | ||
|
|
9ad6440bc0 | ||
|
|
97928e190a | ||
|
|
70a62be5c6 | ||
|
|
ad4f54e9b2 | ||
|
|
0022779e07 | ||
|
|
96dda8810d | ||
|
|
5cfe035362 | ||
|
|
4947333e67 | ||
|
|
8329d12c18 | ||
|
|
f093d5837b | ||
|
|
ceea64ec28 | ||
|
|
c4f1b2d7db | ||
|
|
a0f08686fb | ||
|
|
dacbfbe1fe | ||
|
|
75879ab1d7 | ||
|
|
8b4f0dba70 | ||
|
|
ec8af314cc | ||
|
|
2f72f553ac | ||
|
|
d113da01cd | ||
|
|
b17b28a163 | ||
|
|
6473f0178c | ||
|
|
4e2e487c7a | ||
|
|
8ebb39eed6 | ||
|
|
5e6e6315ad | ||
|
|
9379bc92fd | ||
|
|
51b8d3a153 | ||
|
|
bf42bf0fd5 | ||
|
|
cbdc089321 | ||
|
|
f9bfb40d53 | ||
|
|
e7568f9e0c | ||
|
|
3a4a6ae9ac | ||
|
|
4462e2697c | ||
|
|
0003aabe65 | ||
|
|
4b3565fcaa | ||
|
|
64b0f5b363 | ||
|
|
a58454e788 | ||
|
|
1b733ea28d | ||
|
|
41ab199378 | ||
|
|
0f3b49a915 | ||
|
|
55ff694aa6 | ||
|
|
406dd74220 | ||
|
|
c374fc010a | ||
|
|
3822ab924e | ||
|
|
7c17fa115a | ||
|
|
a3fadb7c1a | ||
|
|
020edbc11d | ||
|
|
79d535dd67 | ||
|
|
29ca7de63c | ||
|
|
6f908eb814 | ||
|
|
b3238cdcd9 | ||
|
|
01622f81e9 | ||
|
|
bd1f43d793 | ||
|
|
09982ae407 | ||
|
|
7d5d417dc9 | ||
|
|
792767d1cb | ||
|
|
0794fc8ff2 | ||
|
|
dac18d73a7 | ||
|
|
c5576dfa69 | ||
|
|
383bc5337f | ||
|
|
04fb20e33d | ||
|
|
e74176ad1a | ||
|
|
52994aa2a9 | ||
|
|
6149f21cd6 | ||
|
|
1a2cc933a0 | ||
|
|
8391fa0b89 | ||
|
|
e0f686ccac | ||
|
|
3e56eb5fe3 | ||
|
|
733b020899 | ||
|
|
109a73f672 | ||
|
|
80747a0872 | ||
|
|
f3033c5515 | ||
|
|
6c95c3f250 | ||
|
|
a66bb37e32 | ||
|
|
606abc7fc0 | ||
|
|
b74b1c2b68 | ||
|
|
dd325bb191 | ||
|
|
d8a2c8f6f1 | ||
|
|
1075f77cc3 | ||
|
|
45bbbb6317 | ||
|
|
6140847bba | ||
|
|
cda8006569 | ||
|
|
9dbf818055 | ||
|
|
efbbc9462f | ||
|
|
c9d3564634 | ||
|
|
8dd2ed4c6f | ||
|
|
f3207cee52 | ||
|
|
a84c59734f | ||
|
|
430a4d0504 | ||
|
|
89ac8f6e62 | ||
|
|
95acfdead1 | ||
|
|
604923e034 | ||
|
|
713a773c81 | ||
|
|
e96921822d | ||
|
|
29f4e13e05 | ||
|
|
ef1e0ff886 | ||
|
|
c5e45ecb48 | ||
|
|
31b182b7aa | ||
|
|
d46cd7f573 | ||
|
|
0445156ed9 | ||
|
|
3a29521848 | ||
|
|
2bd673c8eb | ||
|
|
8ff136c716 | ||
|
|
b10ff00e1b | ||
|
|
6b570e2111 | ||
|
|
89922df775 | ||
|
|
1bd2aacb56 | ||
|
|
30ef12d0bb | ||
|
|
199124048e | ||
|
|
9c0754e617 | ||
|
|
0d5f212f30 | ||
|
|
5acd1540c0 | ||
|
|
d65205ecad | ||
|
|
3c27335db3 | ||
|
|
c3cd54a8e0 | ||
|
|
90797cef90 | ||
|
|
9842b4b0fb | ||
|
|
f399abd7ac | ||
|
|
7a0cdd53d5 | ||
|
|
ebda9dcac5 | ||
|
|
15b15d2060 | ||
|
|
3ab6026ad7 | ||
|
|
1152120dea | ||
|
|
d45389e2b0 | ||
|
|
9440fc16ce | ||
|
|
3f04e8bbce | ||
|
|
c9a664a2f8 | ||
|
|
4f0fb3325a | ||
|
|
e963deff5a | ||
|
|
ee5c790878 | ||
|
|
327767a1c1 | ||
|
|
edb4928357 | ||
|
|
b0f35a64d9 | ||
|
|
56ffcf709a | ||
|
|
452f0b775a | ||
|
|
cf24489764 | ||
|
|
576e40eabd | ||
|
|
206139f384 | ||
|
|
603364bdaa | ||
|
|
033a0cb53f | ||
|
|
026fddee4f | ||
|
|
dc542068ae | ||
|
|
c35d6e706f | ||
|
|
bd2f41bf79 | ||
|
|
d1bd98d5e0 | ||
|
|
035838901e | ||
|
|
e342c21336 | ||
|
|
eb9e1f961c | ||
|
|
f26eb4ee89 | ||
|
|
146e251892 | ||
|
|
7130c2e68c | ||
|
|
4a9eb1f1ac | ||
|
|
0adde9d415 | ||
|
|
ee0cc537b8 | ||
|
|
4f7c55507c | ||
|
|
8528cdb314 | ||
|
|
9ddfc79813 | ||
|
|
069906a25d | ||
|
|
5c580846bb | ||
|
|
afda2d39b6 | ||
|
|
743a658613 | ||
|
|
dbc8765104 | ||
|
|
2306108d8a | ||
|
|
4ee393c3da | ||
|
|
953523c3cb | ||
|
|
d862fd4ec7 | ||
|
|
7b3138e694 | ||
|
|
a4b68ec2fb | ||
|
|
f618acf2ab | ||
|
|
513c67230f | ||
|
|
fa3430ad16 | ||
|
|
95cf253b6d | ||
|
|
9b3531d7d6 | ||
|
|
81a0198af2 | ||
|
|
87abbf78e6 | ||
|
|
b362894a56 | ||
|
|
2866ba1a2c | ||
|
|
5764a81410 | ||
|
|
4a81d0a02f | ||
|
|
9d864da353 |
23
.github/CODEOWNERS
vendored
23
.github/CODEOWNERS
vendored
@@ -1,25 +1,12 @@
|
||||
# CODEOWNERS: https://help.github.com/articles/about-codeowners/
|
||||
|
||||
# Everything goes through the following "global owners" by default.
|
||||
# Everything goes through the following "global owners" by default.
|
||||
# Unless a later match takes precedence, these three will be
|
||||
# requested for review when someone opens a PR.
|
||||
# requested for review when someone opens a PR.
|
||||
# Note that the last matching pattern takes precedence, so
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
|
||||
# Overrides for tooling packages
|
||||
.github/ @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
DOCKER/ @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
|
||||
# Overrides for core Tendermint packages
|
||||
abci/ @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
evidence/ @cmwaters @ebuchman @melekes @tessr
|
||||
light/ @cmwaters @melekes @ebuchman @tessr
|
||||
|
||||
# Overrides for docs
|
||||
*.md @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
docs/ @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
|
||||
* @ebuchman @tendermint/tendermint-engineering
|
||||
|
||||
/spec @ebuchman @tendermint/tendermint-research @tendermint/tendermint-engineering
|
||||
|
||||
11
.github/ISSUE_TEMPLATE/bug-report.md
vendored
11
.github/ISSUE_TEMPLATE/bug-report.md
vendored
@@ -1,13 +1,18 @@
|
||||
---
|
||||
name: Bug Report
|
||||
name: Bug report
|
||||
about: Create a report to help us squash bugs!
|
||||
|
||||
---
|
||||
<!--
|
||||
|
||||
Please fill in as much of the template below as you can.
|
||||
|
||||
Be ready for followup questions, and please respond in a timely
|
||||
manner. We might ask you to provide additional logs and data (tendermint & app).
|
||||
If you have general questions, please create a new discussion:
|
||||
https://github.com/tendermint/tendermint/discussions
|
||||
|
||||
Be ready for followup questions, and please respond in a timely manner. We might
|
||||
ask you to provide additional logs and data (tendermint & app).
|
||||
|
||||
-->
|
||||
|
||||
**Tendermint version** (use `tendermint version` or `git rev-parse --verify HEAD` if installed from source):
|
||||
|
||||
5
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
5
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
blank_issues_enabled: true
|
||||
contact_links:
|
||||
- name: Ask a question
|
||||
url: https://github.com/tendermint/tendermint/discussions
|
||||
about: Please ask and answer questions here
|
||||
11
.github/ISSUE_TEMPLATE/feature-request.md
vendored
11
.github/ISSUE_TEMPLATE/feature-request.md
vendored
@@ -1,5 +1,5 @@
|
||||
---
|
||||
name: Feature Request
|
||||
name: Feature request
|
||||
about: Create a proposal to request a feature
|
||||
|
||||
---
|
||||
@@ -25,12 +25,3 @@ Are there any disadvantages of including this feature? -->
|
||||
## Proposal
|
||||
|
||||
<!-- Detailed description of requirements of implementation -->
|
||||
|
||||
____
|
||||
|
||||
#### For Admin Use
|
||||
|
||||
- [ ] Not duplicate issue
|
||||
- [ ] Appropriate labels applied
|
||||
- [ ] Appropriate contributors tagged
|
||||
- [ ] Contributor assigned/self-assigned
|
||||
|
||||
28
.github/ISSUE_TEMPLATE/proposal.md
vendored
Normal file
28
.github/ISSUE_TEMPLATE/proposal.md
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
---
|
||||
name: Protocol change proposal
|
||||
about: Create a proposal to request a change to the protocol
|
||||
|
||||
---
|
||||
|
||||
<!-- < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < ☺
|
||||
v ✰ Thanks for opening an issue! ✰
|
||||
v Before smashing the submit button please review the template.
|
||||
v Word of caution: Under-specified proposals may be rejected summarily
|
||||
☺ > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > -->
|
||||
|
||||
# Protocol Change Proposal
|
||||
|
||||
## Summary
|
||||
|
||||
<!-- Short, concise description of the proposed change -->
|
||||
|
||||
## Problem Definition
|
||||
|
||||
<!-- Why do we need this change?
|
||||
What problems may be addressed by introducing this change?
|
||||
What benefits does Tendermint stand to gain by including this change?
|
||||
Are there any disadvantages of including this change? -->
|
||||
|
||||
## Proposal
|
||||
|
||||
<!-- Detailed description of requirements of implementation -->
|
||||
33
.github/PULL_REQUEST_TEMPLATE.md
vendored
33
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,7 +1,32 @@
|
||||
## Description
|
||||
<!--
|
||||
|
||||
_Please add a description of the changes that this PR introduces and the files that
|
||||
are the most critical to review._
|
||||
Please add a reference to the issue that this PR addresses and indicate which
|
||||
files are most critical to review. If it fully addresses a particular issue,
|
||||
please include "Closes #XXX" (where "XXX" is the issue number).
|
||||
|
||||
Closes: #XXX
|
||||
If this PR is non-trivial/large/complex, please ensure that you have either
|
||||
created an issue that the team's had a chance to respond to, or had some
|
||||
discussion with the team prior to submitting substantial pull requests. The team
|
||||
can be reached via GitHub Discussions or the Cosmos Network Discord server in
|
||||
the #tendermint-core channel. GitHub Discussions is preferred over Discord as it
|
||||
allows us to keep track of conversations topically.
|
||||
https://github.com/tendermint/tendermint/discussions
|
||||
|
||||
If the work in this PR is not aligned with the team's current priorities, please
|
||||
be advised that it may take some time before it is merged - especially if it has
|
||||
not yet been discussed with the team.
|
||||
|
||||
See the project board for the team's current priorities:
|
||||
https://github.com/orgs/tendermint/projects/15/views/5
|
||||
|
||||
-->
|
||||
|
||||
---
|
||||
|
||||
#### PR checklist
|
||||
|
||||
- [ ] Tests written/updated, or no tests needed
|
||||
- [ ] `CHANGELOG_PENDING.md` updated, or no changelog entry needed
|
||||
- [ ] Updated relevant documentation (`docs/`) and code comments, or no
|
||||
documentation updates needed
|
||||
|
||||
|
||||
16
.github/auto-comment.yml
vendored
16
.github/auto-comment.yml
vendored
@@ -1,16 +0,0 @@
|
||||
pullRequestOpened: |
|
||||
:wave: Thanks for creating a PR!
|
||||
|
||||
Before we can merge this PR, please make sure that all the following items have been
|
||||
checked off. If any of the checklist items are not applicable, please leave them but
|
||||
write a little note why.
|
||||
|
||||
- [ ] Wrote tests
|
||||
- [ ] Updated CHANGELOG_PENDING.md
|
||||
- [ ] Linked to Github issue with discussion and accepted design OR link to spec that describes this work.
|
||||
- [ ] Updated relevant documentation (`docs/`) and code comments
|
||||
- [ ] Re-reviewed `Files changed` in the Github PR explorer
|
||||
- [ ] Applied Appropriate Labels
|
||||
|
||||
|
||||
Thank you for your contribution to Tendermint! :rocket:
|
||||
14
.github/codecov.yml
vendored
14
.github/codecov.yml
vendored
@@ -5,19 +5,14 @@ coverage:
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
threshold: 1%
|
||||
patch: on
|
||||
threshold: 20%
|
||||
patch: off
|
||||
changes: off
|
||||
|
||||
github_checks:
|
||||
annotations: false
|
||||
|
||||
comment:
|
||||
layout: "diff, files"
|
||||
behavior: default
|
||||
require_changes: no
|
||||
require_base: no
|
||||
require_head: yes
|
||||
comment: false
|
||||
|
||||
ignore:
|
||||
- "docs"
|
||||
@@ -25,3 +20,6 @@ ignore:
|
||||
- "scripts"
|
||||
- "**/*.pb.go"
|
||||
- "libs/pubsub/query/query.peg.go"
|
||||
- "*.md"
|
||||
- "*.rst"
|
||||
- "*.yml"
|
||||
|
||||
47
.github/dependabot.yml
vendored
47
.github/dependabot.yml
vendored
@@ -3,26 +3,49 @@ updates:
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
interval: weekly
|
||||
target-branch: "main"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "v0.34.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: npm
|
||||
directory: "/docs"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
interval: weekly
|
||||
open-pull-requests-limit: 10
|
||||
reviewers:
|
||||
- fadeev
|
||||
|
||||
###################################
|
||||
##
|
||||
## Update All Go Dependencies
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
interval: weekly
|
||||
target-branch: "main"
|
||||
open-pull-requests-limit: 10
|
||||
reviewers:
|
||||
- melekes
|
||||
- tessr
|
||||
- erikgrinaker
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: weekly
|
||||
target-branch: "v0.34.x"
|
||||
open-pull-requests-limit: 10
|
||||
labels:
|
||||
- T:dependencies
|
||||
- S:automerge
|
||||
|
||||
6
.github/issue_template.md
vendored
Normal file
6
.github/issue_template.md
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
<!--
|
||||
|
||||
If you want to ask a general question, please create a new discussion instead of
|
||||
an issue: https://github.com/tendermint/tendermint/discussions
|
||||
|
||||
-->
|
||||
8
.github/linter/markdownlint.yml
vendored
8
.github/linter/markdownlint.yml
vendored
@@ -1,8 +0,0 @@
|
||||
default: true,
|
||||
MD007: { "indent": 4 }
|
||||
MD013: false
|
||||
MD024: { siblings_only: true }
|
||||
MD025: false
|
||||
MD033: { no-inline-html: false }
|
||||
no-hard-tabs: false
|
||||
whitespace: false
|
||||
15
.github/linters/markdownlint.yml
vendored
Normal file
15
.github/linters/markdownlint.yml
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
# markdownlint configuration for Super-Linter
|
||||
# - https://github.com/DavidAnson/markdownlint
|
||||
# - https://github.com/github/super-linter
|
||||
|
||||
# Default state for all rules
|
||||
default: true
|
||||
|
||||
# See https://github.com/DavidAnson/markdownlint#rules--aliases for rules
|
||||
MD007: {"indent": 4}
|
||||
MD013: false
|
||||
MD024: {siblings_only: true}
|
||||
MD025: false
|
||||
MD033: {no-inline-html: false}
|
||||
no-hard-tabs: false
|
||||
whitespace: false
|
||||
9
.github/linters/yaml-lint.yml
vendored
Normal file
9
.github/linters/yaml-lint.yml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
# Default rules for YAML linting from super-linter.
|
||||
# See: See https://yamllint.readthedocs.io/en/stable/rules.html
|
||||
extends: default
|
||||
rules:
|
||||
document-end: disable
|
||||
document-start: disable
|
||||
line-length: disable
|
||||
truthy: disable
|
||||
29
.github/mergify.yml
vendored
29
.github/mergify.yml
vendored
@@ -1,10 +1,27 @@
|
||||
pull_request_rules:
|
||||
- name: Automerge to master
|
||||
queue_rules:
|
||||
- name: default
|
||||
conditions:
|
||||
- base=master
|
||||
- base=main
|
||||
- label=S:automerge
|
||||
|
||||
pull_request_rules:
|
||||
- name: Automerge to main
|
||||
conditions:
|
||||
- base=main
|
||||
- label=S:automerge
|
||||
actions:
|
||||
merge:
|
||||
queue:
|
||||
method: squash
|
||||
strict: true
|
||||
commit_message: title+body
|
||||
name: default
|
||||
commit_message_template: |
|
||||
{{ title }} (#{{ number }})
|
||||
|
||||
{{ body }}
|
||||
- name: backport patches to v0.34.x branch
|
||||
conditions:
|
||||
- base=main
|
||||
- label=S:backport-to-v0.34.x
|
||||
actions:
|
||||
backport:
|
||||
branches:
|
||||
- v0.34.x
|
||||
|
||||
82
.github/workflows/build.yml
vendored
Normal file
82
.github/workflows/build.yml
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
name: Build
|
||||
# Tests runs different tests (test_abci_apps, test_abci_cli, test_apps)
|
||||
# This workflow runs on every push to main or release branch and every pull requests
|
||||
# All jobs will pass without running if no *{.go, .mod, .sum} files have been modified
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
goarch: ["arm", "amd64"]
|
||||
goos: ["linux"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/*.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
Makefile
|
||||
- name: install
|
||||
run: GOOS=${{ matrix.goos }} GOARCH=${{ matrix.goarch }} make build
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
test_abci_cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/*.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: make install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- run: abci/tests/test_cli/test.sh
|
||||
shell: bash
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
test_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/*.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: make install install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- name: test_apps
|
||||
run: test/app/test.sh
|
||||
shell: bash
|
||||
if: "env.GIT_DIFF != ''"
|
||||
73
.github/workflows/check-generated.yml
vendored
Normal file
73
.github/workflows/check-generated.yml
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
# Verify that generated code is up-to-date.
|
||||
#
|
||||
# Note that we run these checks regardless whether the input files have
|
||||
# changed, because generated code can change in response to toolchain updates
|
||||
# even if no files in the repository are modified.
|
||||
name: Check generated code
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
check-mocks:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: "Check generated mocks"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
make mockery 2>/dev/null
|
||||
|
||||
if ! git diff --stat --exit-code ; then
|
||||
echo ">> ERROR:"
|
||||
echo ">>"
|
||||
echo ">> Generated mocks require update (either Mockery or source files may have changed)."
|
||||
echo ">> Ensure your tools are up-to-date, re-run 'make mockery' and update this PR."
|
||||
echo ">>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
check-proto:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1 # we need a .git directory to run git diff
|
||||
|
||||
- name: "Check protobuf generated code"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Install buf and gogo tools, so that differences that arise from
|
||||
# toolchain differences are also caught.
|
||||
readonly tools="$(mktemp -d)"
|
||||
export PATH="${PATH}:${tools}/bin"
|
||||
export GOBIN="${tools}/bin"
|
||||
|
||||
go install github.com/bufbuild/buf/cmd/buf
|
||||
go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest
|
||||
|
||||
make proto-gen
|
||||
|
||||
if ! git diff --stat --exit-code ; then
|
||||
echo ">> ERROR:"
|
||||
echo ">>"
|
||||
echo ">> Protobuf generated code requires update (either tools or .proto files may have changed)."
|
||||
echo ">> Ensure your tools are up-to-date, re-run 'make proto-gen' and update this PR."
|
||||
echo ">>"
|
||||
exit 1
|
||||
fi
|
||||
127
.github/workflows/coverage.yml
vendored
127
.github/workflows/coverage.yml
vendored
@@ -1,127 +0,0 @@
|
||||
name: Test Coverage
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
split-test-files:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Create a file with all the pkgs
|
||||
run: go list ./... > pkgs.txt
|
||||
- name: Split pkgs into 4 files
|
||||
run: split -d -n l/4 pkgs.txt pkgs.txt.part.
|
||||
# cache multiple
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-00"
|
||||
path: ./pkgs.txt.part.00
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-01"
|
||||
path: ./pkgs.txt.part.01
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-02"
|
||||
path: ./pkgs.txt.part.02
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-03"
|
||||
path: ./pkgs.txt.part.03
|
||||
|
||||
build-linux:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
goarch: ["arm", "amd64"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: GOOS=linux GOARCH=${{ matrix.goarch }} make build
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: split-test-files
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
part: ["00", "01", "02", "03"]
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}"
|
||||
if: env.GIT_DIFF
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.15
|
||||
- name: test & coverage report creation
|
||||
run: |
|
||||
cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
|
||||
path: ./${{ matrix.part }}profile.out
|
||||
|
||||
upload-coverage-report:
|
||||
runs-on: ubuntu-latest
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-00-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-01-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-02-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-03-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v1.2.1
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
23
.github/workflows/docker.yml
vendored
23
.github/workflows/docker.yml
vendored
@@ -1,20 +1,19 @@
|
||||
name: Build & Push
|
||||
# Build & Push rebuilds the tendermint docker image on every push to master and creation of tags
|
||||
# and pushes the image to https://hub.docker.com/r/interchainio/simapp/tags
|
||||
name: Docker
|
||||
# Build & Push rebuilds the Tendermint docker image on every push to main and creation of tags
|
||||
# and pushes the image to https://hub.docker.com/r/tendermint/tendermint
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc*" # Push events to matching v*, i.e. v1.0-rc1, v20.15.10-rc5
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc*" # Push events to matching v*, i.e. v1.0-rc1, v20.15.10-rc5
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: actions/checkout@v3
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
@@ -39,18 +38,18 @@ jobs:
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
- name: Set up Docker Build
|
||||
uses: docker/setup-buildx-action@v2.0.0
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2.0.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v3.1.1
|
||||
with:
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
|
||||
62
.github/workflows/docs-deployment.yml
vendored
Normal file
62
.github/workflows/docs-deployment.yml
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
# Build and deploy the docs.tendermint.com website content.
|
||||
# The static content is published to GitHub Pages.
|
||||
#
|
||||
# For documentation build info, see docs/DOCS_README.md.
|
||||
name: Build static documentation site
|
||||
on:
|
||||
workflow_dispatch: # allow manual updates
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- docs/**
|
||||
- spec/**
|
||||
|
||||
jobs:
|
||||
# This is split into two jobs so that the build, which runs npm, does not
|
||||
# have write access to anything. The deploy requires write access to publish
|
||||
# to the branch used by GitHub Pages, however, so we can't just make the
|
||||
# whole workflow read-only.
|
||||
build:
|
||||
name: VuePress build
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: alpine:latest
|
||||
permissions:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Install generator dependencies
|
||||
run: |
|
||||
apk add --no-cache make bash git npm
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
# We need to fetch full history so the backport branches for previous
|
||||
# versions will be available for the build.
|
||||
fetch-depth: 0
|
||||
- name: Build documentation
|
||||
run: |
|
||||
git config --global --add safe.directory "$PWD"
|
||||
make build-docs
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: build-output
|
||||
path: ~/output/
|
||||
|
||||
deploy:
|
||||
name: Deploy to GitHub Pages
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: build-output
|
||||
path: ~/output
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: JamesIves/github-pages-deploy-action@v4
|
||||
with:
|
||||
branch: 'docs-tendermint-com'
|
||||
folder: ~/output
|
||||
single-commit: true
|
||||
20
.github/workflows/docs-toc.yml
vendored
Normal file
20
.github/workflows/docs-toc.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
# Verify that important design docs have ToC entries.
|
||||
name: Check documentation ToC
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
docs/architecture/**
|
||||
docs/rfc/**
|
||||
- run: make check-docs-toc
|
||||
if: env.GIT_DIFF
|
||||
32
.github/workflows/docs.yml
vendored
32
.github/workflows/docs.yml
vendored
@@ -1,32 +0,0 @@
|
||||
name: Documentation
|
||||
# This job builds and deploys documentation to github pages.
|
||||
# It runs on every push to master, and can be manually triggered.
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build-and-deploy:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: tendermintdev/docker-website-deployment
|
||||
steps:
|
||||
- name: Checkout 🛎️
|
||||
uses: actions/checkout@v2.3.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install and Build 🔧
|
||||
run: |
|
||||
apk add rsync
|
||||
make build-docs
|
||||
|
||||
- name: Deploy 🚀
|
||||
uses: JamesIves/github-pages-deploy-action@3.7.1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
BRANCH: gh-pages
|
||||
FOLDER: ~/output
|
||||
36
.github/workflows/e2e-manual.yml
vendored
Normal file
36
.github/workflows/e2e-manual.yml
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
# Runs randomly generated E2E testnets nightly on main
|
||||
# manually run e2e tests
|
||||
name: e2e-manual
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 5 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
104
.github/workflows/e2e-nightly-34x.yml
vendored
Normal file
104
.github/workflows/e2e-nightly-34x.yml
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
# Runs randomly generated E2E testnets nightly on the 0.34.x branch.
|
||||
|
||||
# !! This file should be kept in sync with the e2e-nightly-main.yml file,
|
||||
# modulo changes to the version labels.
|
||||
|
||||
name: e2e-nightly-34x
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: 'v0.34.x'
|
||||
|
||||
- name: Capture git repo info
|
||||
id: git-info
|
||||
run: |
|
||||
echo "::set-output name=branch::`git branch --show-current`"
|
||||
echo "::set-output name=commit::`git rev-parse HEAD`"
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 2 -d networks/nightly
|
||||
|
||||
- name: Run testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
outputs:
|
||||
git-branch: ${{ steps.git-info.outputs.branch }}
|
||||
git-commit: ${{ steps.git-info.outputs.commit }}
|
||||
|
||||
e2e-nightly-fail:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.21.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ needs.e2e-nightly-test.outputs.git-commit }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> that caused the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: slackapi/slack-github-action@v1.21.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ needs.e2e-nightly-test.outputs.git-branch }}
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":white_check_mark: Nightly E2E tests for `${{ env.BRANCH }}` passed."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
93
.github/workflows/e2e-nightly-main.yml
vendored
Normal file
93
.github/workflows/e2e-nightly-main.yml
vendored
Normal file
@@ -0,0 +1,93 @@
|
||||
# Runs randomly generated E2E testnets nightly on main
|
||||
|
||||
# !! Relevant changes to this file should be propagated to the e2e-nightly-<V>x
|
||||
# files for the supported backport branches, when appropriate, modulo version
|
||||
# markers.
|
||||
|
||||
name: e2e-nightly-main
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03', "04"]
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 5 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.21.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ github.ref_name }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
COMMIT_URL: "${{ github.server_url }}/${{ github.repository }}/commit/${{ github.sha }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly E2E tests for `${{ env.BRANCH }}` failed. See the <${{ env.RUN_URL }}|run details> and the <${{ env.COMMIT_URL }}|commit> that caused the failure."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: slackapi/slack-github-action@v1.21.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ github.ref_name }}
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":white_check_mark: Nightly E2E tests for `${{ env.BRANCH }}` passed."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
57
.github/workflows/e2e-nightly.yml
vendored
57
.github/workflows/e2e-nightly.yml
vendored
@@ -1,57 +0,0 @@
|
||||
# Runs randomly generated E2E testnets nightly.
|
||||
name: e2e-nightly
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03']
|
||||
# todo: expand to multiple versions after 0.35 release
|
||||
branch: ['master', 'v0.34.x']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.15'
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ matrix.branch}}
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly
|
||||
|
||||
- name: Run testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed
|
||||
SLACK_FOOTER: ''
|
||||
23
.github/workflows/e2e.yml
vendored
23
.github/workflows/e2e.yml
vendored
@@ -1,12 +1,12 @@
|
||||
name: e2e
|
||||
# Runs the CI end-to-end test network on all pushes to master or release branches
|
||||
# Runs the CI end-to-end test network on all pushes to main or release branches
|
||||
# and every pull request, but only if any Go files have been changed.
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
@@ -14,11 +14,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.15'
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: '1.18'
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -28,15 +28,10 @@ jobs:
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run two make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker runner
|
||||
run: make -j2 docker runner tests
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
- name: Run CI testnet
|
||||
working-directory: test/e2e
|
||||
run: ./build/runner -f networks/ci.toml
|
||||
run: ./run-multiple.sh networks/ci.toml
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
- name: Emit logs on failure
|
||||
if: ${{ failure() }}
|
||||
working-directory: test/e2e
|
||||
run: ./build/runner -f networks/ci.toml logs
|
||||
|
||||
65
.github/workflows/fuzz-nightly.yml
vendored
65
.github/workflows/fuzz-nightly.yml
vendored
@@ -1,23 +1,27 @@
|
||||
# Runs fuzzing nightly.
|
||||
name: fuzz-nightly
|
||||
name: Fuzz Tests
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 3 * * *'
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths:
|
||||
- "test/fuzz/**/*.go"
|
||||
|
||||
jobs:
|
||||
fuzz-nightly-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.15'
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install go-fuzz
|
||||
working-directory: test/fuzz
|
||||
run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
run: go install github.com/dvyukov/go-fuzz/go-fuzz@latest github.com/dvyukov/go-fuzz/go-fuzz-build@latest
|
||||
|
||||
- name: Fuzz mempool
|
||||
working-directory: test/fuzz
|
||||
@@ -44,26 +48,51 @@ jobs:
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-rpc-server
|
||||
continue-on-error: true
|
||||
|
||||
- name: Archive crashers
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: crashers
|
||||
path: test/fuzz/**/crashers
|
||||
retention-days: 3
|
||||
|
||||
- name: Archive suppressions
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: suppressions
|
||||
path: test/fuzz/**/suppressions
|
||||
retention-days: 3
|
||||
|
||||
- name: Set crashers count
|
||||
working-directory: test/fuzz
|
||||
run: echo "::set-output name=crashers-count::$(find . -type d -name "crashers" | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')"
|
||||
run: echo "::set-output name=count::$(find . -type d -name 'crashers' | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')"
|
||||
id: set-crashers-count
|
||||
|
||||
outputs:
|
||||
crashers_count: ${{ steps.set-crashers-count.outputs.crashers-count }}
|
||||
crashers-count: ${{ steps.set-crashers-count.outputs.count }}
|
||||
|
||||
fuzz-nightly-fail:
|
||||
needs: fuzz-nightly-test
|
||||
if: ${{ needs.set-crashers-count.outputs.crashers-count != 0 }}
|
||||
if: ${{ needs.fuzz-nightly-test.outputs.crashers-count != 0 }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack if any crashers
|
||||
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
|
||||
- name: Notify Slack on failure
|
||||
uses: slackapi/slack-github-action@v1.21.0
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly Fuzz Tests
|
||||
SLACK_ICON_EMOJI: ':firecracker:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Crashers found in Nightly Fuzz tests
|
||||
SLACK_FOOTER: ''
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
BRANCH: ${{ github.ref_name }}
|
||||
CRASHERS: ${{ needs.fuzz-nightly-test.outputs.crashers-count }}
|
||||
RUN_URL: "${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":skull: Nightly fuzz tests for `${{ env.BRANCH }}` failed with ${{ env.CRASHERS }} crasher(s). See the <${{ env.RUN_URL }}|run details>."
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
16
.github/workflows/janitor.yml
vendored
Normal file
16
.github/workflows/janitor.yml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: Janitor
|
||||
# Janitor cleans up previous runs of various workflows
|
||||
# To add more workflows to cancel visit https://api.github.com/repos/tendermint/tendermint/actions/workflows and find the actions name
|
||||
on:
|
||||
pull_request:
|
||||
|
||||
jobs:
|
||||
cancel:
|
||||
name: "Cancel Previous Runs"
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 3
|
||||
steps:
|
||||
- uses: styfle/cancel-workflow-action@0.10.0
|
||||
with:
|
||||
workflow_id: 1041851,1401230,2837803
|
||||
access_token: ${{ github.token }}
|
||||
12
.github/workflows/linkchecker.yml
vendored
12
.github/workflows/linkchecker.yml
vendored
@@ -1,12 +0,0 @@
|
||||
name: Check Markdown links
|
||||
on:
|
||||
schedule:
|
||||
- cron: '* */24 * * *'
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.12
|
||||
with:
|
||||
folder-path: "docs"
|
||||
29
.github/workflows/lint.yaml
vendored
29
.github/workflows/lint.yaml
vendored
@@ -1,29 +0,0 @@
|
||||
name: Lint
|
||||
# Lint runs golangci-lint over the entire Tendermint repository
|
||||
# This workflow is run on every pull request and push to master
|
||||
# The `golangci` job will pass without running if no *.{go, mod, sum} files have been modified.
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
golangci:
|
||||
name: golangci-lint
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 8
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v2.3.0
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.31
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
40
.github/workflows/lint.yml
vendored
Normal file
40
.github/workflows/lint.yml
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
name: Golang Linter
|
||||
# Lint runs golangci-lint over the entire Tendermint repository.
|
||||
#
|
||||
# This workflow is run on every pull request and push to main.
|
||||
#
|
||||
# The `golangci` job will pass without running if no *.{go, mod, sum}
|
||||
# files have been modified.
|
||||
#
|
||||
# To run this locally, simply run `make lint` from the root of the repo.
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
jobs:
|
||||
golangci:
|
||||
name: golangci-lint
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 8
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and
|
||||
# must be specified without patch version: we always use the
|
||||
# latest patch version.
|
||||
version: v1.47.3
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
17
.github/workflows/linter.yml
vendored
17
.github/workflows/linter.yml
vendored
@@ -1,16 +1,17 @@
|
||||
name: Lint
|
||||
name: Markdown Linter
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
paths:
|
||||
- "**.md"
|
||||
- "**.yml"
|
||||
- "**.yaml"
|
||||
pull_request:
|
||||
branches: [master]
|
||||
branches: [main]
|
||||
paths:
|
||||
- "**.md"
|
||||
- "**.yml"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -18,14 +19,14 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Lint Code Base
|
||||
uses: docker://github/super-linter:v3
|
||||
uses: docker://github/super-linter:v4
|
||||
env:
|
||||
LINTER_RULES_PATH: .
|
||||
VALIDATE_ALL_CODEBASE: true
|
||||
DEFAULT_BRANCH: master
|
||||
DEFAULT_BRANCH: main
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
VALIDATE_MD: true
|
||||
VALIDATE_OPAENAPI: true
|
||||
VALIDATE_OPENAPI: true
|
||||
VALIDATE_YAML: true
|
||||
YAML_CONFIG_FILE: yaml-lint.yml
|
||||
|
||||
23
.github/workflows/markdown-links.yml
vendored
Normal file
23
.github/workflows/markdown-links.yml
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
name: Check Markdown links
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.md
|
||||
- uses: creachadair/github-action-markdown-link-check@master
|
||||
with:
|
||||
check-modified-files-only: 'yes'
|
||||
config-file: '.md-link-check.json'
|
||||
if: env.GIT_DIFF
|
||||
51
.github/workflows/proto-docker.yml
vendored
51
.github/workflows/proto-docker.yml
vendored
@@ -1,51 +0,0 @@
|
||||
name: Build & Push TM Proto Builder
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "tools/proto/*"
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "tools/proto/*"
|
||||
schedule:
|
||||
# run this job once a month to recieve any go or buf updates
|
||||
- cron: "* * 1 * *"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
DOCKER_IMAGE=tendermintdev/docker-build-proto
|
||||
VERSION=noop
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
elif [[ $GITHUB_REF == refs/heads/* ]]; then
|
||||
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
|
||||
if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then
|
||||
VERSION=latest
|
||||
fi
|
||||
fi
|
||||
TAGS="${DOCKER_IMAGE}:${VERSION}"
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./tools/proto
|
||||
file: ./tools/proto/Dockerfile
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
21
.github/workflows/proto-lint.yml
vendored
Normal file
21
.github/workflows/proto-lint.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Protobuf Lint
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'proto/**'
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- 'proto/**'
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.7.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
23
.github/workflows/proto.yml
vendored
23
.github/workflows/proto.yml
vendored
@@ -1,23 +0,0 @@
|
||||
name: Protobuf
|
||||
# Protobuf runs buf (https://buf.build/) lint and check-breakage
|
||||
# This workflow is only run when a .proto file has been modified
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
pull_request:
|
||||
paths:
|
||||
- "**.proto"
|
||||
jobs:
|
||||
proto-lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: lint
|
||||
run: make proto-lint
|
||||
proto-breakage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: check-breakage
|
||||
run: make proto-check-breaking-ci
|
||||
19
.github/workflows/release.yml
vendored
19
.github/workflows/release.yml
vendored
@@ -2,36 +2,33 @@ name: "Release"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "RC[0-9]/**"
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.15'
|
||||
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
go-version: '1.18'
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
|
||||
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
|
||||
8
.github/workflows/stale.yml
vendored
8
.github/workflows/stale.yml
vendored
@@ -7,12 +7,14 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v3
|
||||
- uses: actions/stale@v5
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions."
|
||||
days-before-stale: 10
|
||||
days-before-close: 4
|
||||
days-before-stale: -1
|
||||
days-before-close: -1
|
||||
days-before-pr-stale: 10
|
||||
days-before-pr-close: 4
|
||||
exempt-pr-labels: "S:wip"
|
||||
|
||||
148
.github/workflows/tests.yml
vendored
148
.github/workflows/tests.yml
vendored
@@ -1,146 +1,34 @@
|
||||
name: Tests
|
||||
# Tests runs different tests (test_abci_apps, test_abci_cli, test_apps)
|
||||
# This workflow runs on every push to master or release branch and every pull requests
|
||||
# All jobs will pass without running if no *{.go, .mod, .sum} files have been modified
|
||||
name: Test
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
paths:
|
||||
- "**.go"
|
||||
branches:
|
||||
- master
|
||||
- main
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
cleanup-runs:
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
part: ["00", "01", "02", "03", "04", "05"]
|
||||
steps:
|
||||
- uses: rokroskar/workflow-run-cleanup-action@master
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
if: "!startsWith(github.ref, 'refs/tags/') && github.ref != 'refs/heads/master'"
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
"!test/"
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: make install install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
# Cache binaries for use by other jobs
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- name: test_abci_apps
|
||||
run: abci/tests/test_app/test.sh
|
||||
shell: bash
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- run: abci/tests/test_cli/test.sh
|
||||
shell: bash
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- name: test_apps
|
||||
run: test/app/test.sh
|
||||
shell: bash
|
||||
Makefile
|
||||
- name: Run Go Tests
|
||||
run: |
|
||||
make test-group-${{ matrix.part }} NUM_SPLIT=6
|
||||
if: env.GIT_DIFF
|
||||
|
||||
14
.gitignore
vendored
14
.gitignore
vendored
@@ -15,7 +15,7 @@
|
||||
.vagrant
|
||||
.vendor-new/
|
||||
.vscode/
|
||||
abci-cli
|
||||
abci/abci-cli
|
||||
addrbook.json
|
||||
artifacts/*
|
||||
build/*
|
||||
@@ -24,6 +24,8 @@ docs/.vuepress/dist
|
||||
docs/_build
|
||||
docs/dist
|
||||
docs/node_modules/
|
||||
docs/spec
|
||||
docs/.vuepress/public/rpc
|
||||
index.html.md
|
||||
libs/pubsub/query/fuzz_test/output
|
||||
profile\.out
|
||||
@@ -35,13 +37,21 @@ shunit2
|
||||
terraform.tfstate
|
||||
terraform.tfstate.backup
|
||||
terraform.tfstate.d
|
||||
test/app/grpc_client
|
||||
test/e2e/build
|
||||
test/e2e/networks/*/
|
||||
test/logs
|
||||
test/maverick/maverick
|
||||
test/p2p/data/
|
||||
vendor
|
||||
test/fuzz/**/corpus
|
||||
test/fuzz/**/crashers
|
||||
test/fuzz/**/suppressions
|
||||
test/fuzz/**/*.zip
|
||||
proto/spec/**/*.pb.go
|
||||
*.aux
|
||||
*.bbl
|
||||
*.blg
|
||||
*.log
|
||||
*.pdf
|
||||
*.gz
|
||||
*.dvi
|
||||
|
||||
@@ -1,61 +1,46 @@
|
||||
linters:
|
||||
enable:
|
||||
- asciicheck
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- errcheck
|
||||
# - funlen
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
- exportloopref
|
||||
- goconst
|
||||
- gocritic
|
||||
# - gocyclo
|
||||
# - godox
|
||||
- gofmt
|
||||
- goimports
|
||||
- golint
|
||||
- revive
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
# - interfacer
|
||||
- lll
|
||||
- misspell
|
||||
# - maligned
|
||||
- nakedret
|
||||
- nolintlint
|
||||
- prealloc
|
||||
- scopelint
|
||||
- staticcheck
|
||||
- structcheck
|
||||
# - structcheck // to be fixed by golangci-lint
|
||||
- stylecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
# - unparam
|
||||
- unused
|
||||
- varcheck
|
||||
# - whitespace
|
||||
# - wsl
|
||||
# - gocognit
|
||||
- nolintlint
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- gosec
|
||||
- linters:
|
||||
- lll
|
||||
source: "https://"
|
||||
max-same-issues: 50
|
||||
|
||||
linters-settings:
|
||||
dogsled:
|
||||
max-blank-identifiers: 3
|
||||
maligned:
|
||||
suggest-new: true
|
||||
# govet:
|
||||
# check-shadowing: true
|
||||
golint:
|
||||
min-confidence: 0
|
||||
maligned:
|
||||
suggest-new: true
|
||||
misspell:
|
||||
locale: US
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
project_name: Tendermint
|
||||
project_name: tendermint
|
||||
|
||||
env:
|
||||
# Require use of Go modules.
|
||||
@@ -29,8 +29,8 @@ release:
|
||||
|
||||
archives:
|
||||
- files:
|
||||
- LICENSE
|
||||
- README.md
|
||||
- UPGRADING.md
|
||||
- SECURITY.md
|
||||
- CHANGELOG.md
|
||||
- LICENSE
|
||||
- README.md
|
||||
- UPGRADING.md
|
||||
- SECURITY.md
|
||||
- CHANGELOG.md
|
||||
|
||||
17
.markdownlint.yml
Normal file
17
.markdownlint.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
# markdownlint configuration
|
||||
# https://github.com/DavidAnson/markdownlint
|
||||
|
||||
# Default state for all rules
|
||||
default: true
|
||||
|
||||
# See https://github.com/DavidAnson/markdownlint#rules--aliases for rules
|
||||
MD001: false
|
||||
MD007: {indent: 4}
|
||||
MD013: false
|
||||
MD024: {siblings_only: true}
|
||||
MD025: false
|
||||
MD033: false
|
||||
MD036: false
|
||||
MD010: false
|
||||
MD012: false
|
||||
MD028: false
|
||||
6
.md-link-check.json
Normal file
6
.md-link-check.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"retryOn429": true,
|
||||
"retryCount": 5,
|
||||
"fallbackRetryDelay": "30s",
|
||||
"aliveStatusCodes": [200, 206, 503]
|
||||
}
|
||||
406
CHANGELOG.md
406
CHANGELOG.md
@@ -1,21 +1,277 @@
|
||||
# Changelog
|
||||
|
||||
## v0.34.3
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
*January 19, 2021*
|
||||
## v0.34.20
|
||||
|
||||
This release includes a fix for a high-severity security vulnerability.
|
||||
More information on this vulnerability will be released on January 26, 2021
|
||||
and this changelog will be updated.
|
||||
Special thanks to external contributors on this release: @joeabbey @yihuang
|
||||
|
||||
It also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for
|
||||
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
|
||||
This release introduces a prioritized mempool. Further notes can be found in UPGRADING.md.
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
NOTE: There's a known issue when combining the prioritized mempool with the ABCI socket client, that the team are curently working to resolve. Read more about the issue [here](https://github.com/tendermint/tendermint/pull/9030).
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [evidence] [N/A] Use correct source of evidence time (@cmwaters)
|
||||
- [blocksync] [\#8496](https://github.com/tendermint/tendermint/pull/8496) validate block against state before persisting it to disk (@cmwaters)
|
||||
- [indexer] [#8625](https://github.com/tendermint/tendermint/pull/8625) Fix overriding tx index of duplicated txs. (@yihuang)
|
||||
- [mempool] [\#8962](https://github.com/tendermint/tendermint/issues/8962) Backport priority mempool fixes from v0.35.x to v0.34.x (@creachadair).
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#8674] Add command to force compact goleveldb databases (@cmwaters)
|
||||
- [mempool] [\#8695] Port back the priority mempool. (@alexanderbez, @jmalicevic, @cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [logging] [\#8845](https://github.com/tendermint/tendermint/issues/8845) Add "Lazy" Stringers to defer Sprintf and Hash until logs print. (@joeabbey)
|
||||
|
||||
## v0.34.19
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [cli] [\#8270](https://github.com/tendermint/tendermint/issues/8270) fix reset commands (@alexanderbez).
|
||||
|
||||
## v0.34.18
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [cli] [\#8258](https://github.com/tendermint/tendermint/pull/8258) Fix a bug in the cli that caused `unsafe-reset-all` to panic
|
||||
|
||||
## v0.34.17
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
|
||||
- [cli] [\#8081](https://github.com/tendermint/tendermint/issues/8081) make the reset command safe to use (@marbar3778).
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#8079](https://github.com/tendermint/tendermint/issues/8079) start the timeout ticker before relay (backport #7844) (@creachadair).
|
||||
- [consensus] [\#7992](https://github.com/tendermint/tendermint/issues/7992) [\#7994](https://github.com/tendermint/tendermint/issues/7994) change lock handling in handleMsg and reactor to alleviate issues gossiping during long ABCI calls (@williambanfield).
|
||||
|
||||
## v0.34.16
|
||||
|
||||
Special thanks to external contributors on this release: @yihuang
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#7617](https://github.com/tendermint/tendermint/issues/7617) calculate prevote message delay metric (backport #7551) (@williambanfield).
|
||||
- [consensus] [\#7631](https://github.com/tendermint/tendermint/issues/7631) check proposal non-nil in prevote message delay metric (backport #7625) (@williambanfield).
|
||||
- [statesync] [\#7885](https://github.com/tendermint/tendermint/issues/7885) statesync: assert app version matches (backport #7856) (@cmwaters).
|
||||
- [statesync] [\#7881](https://github.com/tendermint/tendermint/issues/7881) fix app hash in state rollback (backport #7837) (@cmwaters).
|
||||
- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang).
|
||||
|
||||
## v0.34.15
|
||||
|
||||
Special thanks to external contributors on this release: @thanethomson
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [\#7368](https://github.com/tendermint/tendermint/issues/7368) cmd: add integration test for rollback functionality (@cmwaters).
|
||||
- [\#7309](https://github.com/tendermint/tendermint/issues/7309) pubsub: Report a non-nil error when shutting down (fixes #7306).
|
||||
- [\#7057](https://github.com/tendermint/tendermint/pull/7057) Import Postgres driver support for the psql indexer (@creachadair).
|
||||
- [\#7106](https://github.com/tendermint/tendermint/pull/7106) Revert mutex change to ABCI Clients (@tychoish).
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [config] [\#7230](https://github.com/tendermint/tendermint/issues/7230) rpc: Add experimental config params to allow for subscription buffer size control (@thanethomson).
|
||||
|
||||
## v0.34.14
|
||||
|
||||
This release backports the `rollback` feature to allow recovery in the event of an incorrect app hash.
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [\#6982](https://github.com/tendermint/tendermint/pull/6982) The tendermint binary now has built-in suppport for running the end-to-end test application (with state sync support) (@cmwaters).
|
||||
- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state. This may be useful in the event of non-determinstic app hash or when reverting an upgrade. @cmwaters
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [\#7103](https://github.com/tendermint/tendermint/pull/7104) Remove IAVL dependency (backport of #6550) (@cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [\#7057](https://github.com/tendermint/tendermint/pull/7057) Import Postgres driver support for the psql indexer (@creachadair).
|
||||
- [ABCI] [\#7110](https://github.com/tendermint/tendermint/issues/7110) Revert "change client to use multi-reader mutexes (#6873)" (@tychoish).
|
||||
|
||||
## v0.34.13
|
||||
|
||||
This release backports improvements to state synchronization and ABCI
|
||||
performance under concurrent load, and the PostgreSQL event indexer.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [statesync] [\#6881](https://github.com/tendermint/tendermint/issues/6881) improvements to stateprovider logic (@cmwaters)
|
||||
- [ABCI] [\#6873](https://github.com/tendermint/tendermint/issues/6873) change client to use multi-reader mutexes (@tychoish)
|
||||
- [indexing] [\#6906](https://github.com/tendermint/tendermint/issues/6906) enable the PostgreSQL indexer sink (@creachadair)
|
||||
|
||||
## v0.34.12
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106.
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#6717](https://github.com/tendermint/tendermint/pull/6717) introduce
|
||||
`/genesis_chunked` rpc endpoint for handling large genesis files by chunking them (@tychoish)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [rpc] [\#6825](https://github.com/tendermint/tendermint/issues/6825) Remove egregious INFO log from `ABCI#Query` RPC. (@alexanderbez)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [light] [\#6685](https://github.com/tendermint/tendermint/pull/6685) fix bug
|
||||
with incorrectly handling contexts that would occasionally freeze state sync. (@cmwaters)
|
||||
- [privval] [\#6748](https://github.com/tendermint/tendermint/issues/6748) Fix vote timestamp to prevent chain halt (@JayT106)
|
||||
|
||||
## v0.34.11
|
||||
|
||||
*June 18, 2021*
|
||||
|
||||
This release improves the robustness of statesync; tweaking channel priorities and timeouts and
|
||||
adding two new parameters to the state sync config.
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Apps
|
||||
- [Version] [\#6494](https://github.com/tendermint/tendermint/issues/6494) `TMCoreSemVer` is not required to be set as a ldflag any longer.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [statesync] [\#6566](https://github.com/tendermint/tendermint/issues/6566) Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
||||
- [statesync] [\#6378](https://github.com/tendermint/tendermint/issues/6378) Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish)
|
||||
- [statesync] [\#6582](https://github.com/tendermint/tendermint/issues/6582) Increase chunk priority and add multiple retry chunk requests (@cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [evidence] [\#6375](https://github.com/tendermint/tendermint/issues/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters)
|
||||
|
||||
## v0.34.10
|
||||
|
||||
*April 14, 2021*
|
||||
|
||||
This release fixes a bug where peers would sometimes try to send messages
|
||||
on incorrect channels. Special thanks to our friends at Oasis Labs for surfacing
|
||||
this issue!
|
||||
|
||||
- [p2p/node] [\#6339](https://github.com/tendermint/tendermint/issues/6339) Fix bug with using custom channels (@cmwaters)
|
||||
- [light] [\#6346](https://github.com/tendermint/tendermint/issues/6346) Correctly handle too high errors to improve client robustness (@cmwaters)
|
||||
|
||||
## v0.34.9
|
||||
|
||||
*April 8, 2021*
|
||||
|
||||
This release fixes a moderate severity security issue, Security Advisory Alderfly,
|
||||
which impacts all networks that rely on Tendermint light clients.
|
||||
Further details will be released once networks have upgraded.
|
||||
|
||||
This release also includes a small Go API-breaking change, to reduce panics in the RPC layer.
|
||||
|
||||
Special thanks to our external contributors on this release: @gchaincl
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Go API
|
||||
- [rpc/jsonrpc/server] [\#6204](https://github.com/tendermint/tendermint/issues/6204) Modify `WriteRPCResponseHTTP(Error)` to return an error (@melekes)
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#6226](https://github.com/tendermint/tendermint/issues/6226) Index block events and expose a new RPC method, `/block_search`, to allow querying for blocks by `BeginBlock` and `EndBlock` events (@alexanderbez)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [rpc/jsonrpc/server] [\#6191](https://github.com/tendermint/tendermint/issues/6191) Correctly unmarshal `RPCRequest` when data is `null` (@melekes)
|
||||
- [p2p] [\#6289](https://github.com/tendermint/tendermint/issues/6289) Fix "unknown channels" bug on CustomReactors (@gchaincl)
|
||||
- [light/evidence] Adds logic to handle forward lunatic attacks (@cmwaters)
|
||||
|
||||
## v0.34.8
|
||||
|
||||
*February 25, 2021*
|
||||
|
||||
This release, in conjunction with [a fix in the Cosmos SDK](https://github.com/cosmos/cosmos-sdk/pull/8641),
|
||||
introduces changes that should mean the logs are much, much quieter. 🎉
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [libs/log] [\#6174](https://github.com/tendermint/tendermint/issues/6174) Include timestamp (`ts` field; `time.RFC3339Nano` format) in JSON logger output (@melekes)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [abci] [\#6124](https://github.com/tendermint/tendermint/issues/6124) Fixes a panic condition during callback execution in `ReCheckTx` during high tx load. (@alexanderbez)
|
||||
|
||||
## v0.34.7
|
||||
|
||||
*February 18, 2021*
|
||||
|
||||
This release fixes a downstream security issue which impacts Cosmos SDK
|
||||
users who are:
|
||||
|
||||
* Using Cosmos SDK v0.40.0 or later, AND
|
||||
* Running validator nodes, AND
|
||||
* Using the file-based `FilePV` implementation for their consensus keys
|
||||
|
||||
Users who fulfill all the above criteria were susceptible to leaking
|
||||
private key material in the logs. All other users are unaffected.
|
||||
|
||||
The root cause was a discrepancy
|
||||
between the Tendermint Core (untyped) logger and the Cosmos SDK (typed) logger:
|
||||
Tendermint Core's logger automatically stringifies Go interfaces whenever possible;
|
||||
however, the Cosmos SDK's logger uses reflection to log the fields within a Go interface.
|
||||
|
||||
The introduction of the typed logger meant that previously un-logged fields within
|
||||
interfaces are now sometimes logged, including the private key material inside the
|
||||
`FilePV` struct.
|
||||
|
||||
Tendermint Core v0.34.7 fixes this issue; however, we strongly recommend that all validators
|
||||
use remote signer implementations instead of `FilePV` in production.
|
||||
|
||||
Thank you to @joe-bowman for his assistance with this vulnerability and a particular
|
||||
shout-out to @marbar3778 for diagnosing it quickly.
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#6128](https://github.com/tendermint/tendermint/pull/6128) Remove privValidator from log call (@tessr)
|
||||
|
||||
## v0.34.6
|
||||
|
||||
*February 18, 2021*
|
||||
|
||||
_Tendermint Core v0.34.5 and v0.34.6 have been recalled due to build tooling problems._
|
||||
|
||||
## v0.34.4
|
||||
|
||||
*February 11, 2021*
|
||||
|
||||
This release includes a fix for a memory leak in the evidence reactor (see #6068, below).
|
||||
All Tendermint clients are recommended to upgrade.
|
||||
Thank you to our friends at Crypto.com for the initial report of this memory leak!
|
||||
|
||||
Special thanks to other external contributors on this release: @yayajacky, @odidev, @laniehei, and @c29r3!
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [light] [\#6022](https://github.com/tendermint/tendermint/pull/6022) Fix a bug when the number of validators equals 100 (@melekes)
|
||||
- [light] [\#6026](https://github.com/tendermint/tendermint/pull/6026) Fix a bug when height isn't provided for the rpc calls: `/commit` and `/validators` (@cmwaters)
|
||||
- [evidence] [\#6068](https://github.com/tendermint/tendermint/pull/6068) Terminate broadcastEvidenceRoutine when peer is stopped (@melekes)
|
||||
|
||||
|
||||
## v0.34.3
|
||||
|
||||
*January 19, 2021*
|
||||
|
||||
This release includes a fix for a high-severity security vulnerability,
|
||||
a DoS-vector that impacted Tendermint Core v0.34.0-v0.34.2. For more details, see
|
||||
[Security Advisory Mulberry](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg)
|
||||
or https://nvd.nist.gov/vuln/detail/CVE-2021-21271.
|
||||
|
||||
Tendermint Core v0.34.3 also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for
|
||||
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [evidence] [[security fix]](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg) Use correct source of evidence time (@cmwaters)
|
||||
- [proto] [\#5886](https://github.com/tendermint/tendermint/pull/5889) Bump gogoproto to 1.3.2 (@marbar3778)
|
||||
|
||||
## v0.34.2
|
||||
@@ -26,8 +282,6 @@ This release fixes a substantial bug in evidence handling where evidence could
|
||||
sometimes be broadcast before the block containing that evidence was fully committed,
|
||||
resulting in some nodes panicking when trying to verify said evidence.
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Go API
|
||||
@@ -51,8 +305,6 @@ disconnecting from this node. As a temporary remedy (until the mempool package
|
||||
is refactored), the `max-batch-bytes` was disabled. Transactions will be sent
|
||||
one by one without batching.
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -81,8 +333,6 @@ Holy smokes, this is a big one! For a more reader-friendly overview of the chang
|
||||
Special thanks to external contributors on this release: @james-ray, @fedekunze, @favadi, @alessio,
|
||||
@joe-bowman, @cuonglm, @SadPencil and @dongsam.
|
||||
|
||||
And as always, friendly reminder, that we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -109,14 +359,14 @@ And as always, friendly reminder, that we have a [bug bounty program](https://ha
|
||||
- [blockchain] [\#4637](https://github.com/tendermint/tendermint/pull/4637) Migrate blockchain reactor(s) to Protobuf encoding (@marbar3778)
|
||||
- [evidence] [\#4949](https://github.com/tendermint/tendermint/pull/4949) Migrate evidence reactor to Protobuf encoding (@marbar3778)
|
||||
- [mempool] [\#4940](https://github.com/tendermint/tendermint/pull/4940) Migrate mempool from to Protobuf encoding (@marbar3778)
|
||||
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
|
||||
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
|
||||
- `MaxBatchBytes` new config setting defines the max size of one batch.
|
||||
- [p2p/pex] [\#4973](https://github.com/tendermint/tendermint/pull/4973) Migrate `p2p/pex` reactor to Protobuf encoding (@marbar3778)
|
||||
- [statesync] [\#4943](https://github.com/tendermint/tendermint/pull/4943) Migrate state sync reactor to Protobuf encoding (@marbar3778)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
|
||||
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
|
||||
- [evidence] [\#5499](https://github.com/tendermint/tendermint/pull/5449) Cap evidence to a maximum number of bytes (supercedes [\#4780](https://github.com/tendermint/tendermint/pull/4780)) (@cmwaters)
|
||||
- [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) Header hashes are no longer empty for empty inputs, notably `DataHash`, `EvidenceHash`, and `LastResultsHash` (@erikgrinaker)
|
||||
- [state] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Include `GasWanted` and `GasUsed` into `LastResultsHash` (@melekes)
|
||||
@@ -175,7 +425,7 @@ And as always, friendly reminder, that we have a [bug bounty program](https://ha
|
||||
- [types] [\#4852](https://github.com/tendermint/tendermint/pull/4852) Vote & Proposal `SignBytes` is now func `VoteSignBytes` & `ProposalSignBytes` (@marbar3778)
|
||||
- [types] [\#4798](https://github.com/tendermint/tendermint/pull/4798) Simplify `VerifyCommitTrusting` func + remove extra validation (@melekes)
|
||||
- [types] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Remove `ABCIResult` (@melekes)
|
||||
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
|
||||
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `Total` in `Parts` & `PartSetHeader` has been changed from a `int` to a `uint32` (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Vote: `ValidatorIndex` & `Round` are now `int32` (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Proposal: `POLRound` & `Round` are now `int32` (@marbar3778)
|
||||
@@ -213,7 +463,7 @@ And as always, friendly reminder, that we have a [bug bounty program](https://ha
|
||||
- [evidence] [\#4722](https://github.com/tendermint/tendermint/pull/4722) Consolidate evidence store and pool types to improve evidence DB (@cmwaters)
|
||||
- [evidence] [\#4839](https://github.com/tendermint/tendermint/pull/4839) Reject duplicate evidence from being proposed (@cmwaters)
|
||||
- [evidence] [\#5219](https://github.com/tendermint/tendermint/pull/5219) Change the source of evidence time to block time (@cmwaters)
|
||||
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
|
||||
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
|
||||
- [light] [\#4935](https://github.com/tendermint/tendermint/pull/4935) Fetch and compare a new header with witnesses in parallel (@melekes)
|
||||
- [light] [\#4929](https://github.com/tendermint/tendermint/pull/4929) Compare header with witnesses only when doing bisection (@melekes)
|
||||
- [light] [\#4916](https://github.com/tendermint/tendermint/pull/4916) Validate basic for inbound validator sets and headers before further processing them (@cmwaters)
|
||||
@@ -323,9 +573,6 @@ as 2/3+ of the signatures are checked._
|
||||
|
||||
Special thanks to @njmurarka at Bluzelle Networks for reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [consensus] Do not allow signatures for a wrong block in commits (@ebuchman)
|
||||
@@ -341,8 +588,6 @@ need to update your code.**
|
||||
|
||||
Special thanks to external contributors on this release: @tau3,
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -402,8 +647,6 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
|
||||
Special thanks to external contributors on this release: @whylee259, @greg-szabo
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -490,9 +733,6 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
|
||||
@@ -505,8 +745,6 @@ program](https://hackerone.com/tendermint).
|
||||
Special thanks to external contributors on this release:
|
||||
@antho1404, @michaelfig, @gterzian, @tau3, @Shivani912
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -557,9 +795,6 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
Special thanks to external contributors on this release:
|
||||
@princesinha19
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc] [\#3333](https://github.com/tendermint/tendermint/issues/3333) Add `order_by` to `/tx_search` endpoint, allowing to change default ordering from asc to desc (@princesinha19)
|
||||
@@ -578,9 +813,6 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
Special thanks to external contributors on this release: @mrekucci, @PSalant726, @princesinha19, @greg-szabo, @dongsam, @cuonglm, @jgimeno, @yenkhoon
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program.](https://hackerone.com/tendermint).
|
||||
|
||||
*January 14, 2020*
|
||||
|
||||
This release contains breaking changes to the `Block#Header`, specifically
|
||||
@@ -809,9 +1041,6 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
|
||||
@@ -823,9 +1052,6 @@ _January, 9, 2020_
|
||||
|
||||
Special thanks to external contributors on this release: @greg-szabo, @gregzaitsev, @yenkhoon
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc/lib] [\#4248](https://github.com/tendermint/tendermint/issues/4248) RPC client basic authentication support (@greg-szabo)
|
||||
@@ -847,9 +1073,6 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
Special thanks to external contributors on this release: @erikgrinaker, @guagualvcha, @hsyis, @cosmostuba, @whunmr, @austinabell
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program.](https://hackerone.com/tendermint).
|
||||
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -889,9 +1112,6 @@ identified and fixed here.
|
||||
Special thanks to [elvishacker](https://hackerone.com/elvishacker) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -918,9 +1138,6 @@ accepting new peers and only allowing `ed25519` pubkeys.
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for pointing
|
||||
this out.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Only allow ed25519 pubkeys when connecting
|
||||
@@ -936,9 +1153,6 @@ All clients are recommended to upgrade. See
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for discovering
|
||||
and reporting this issue.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Fix for panic on nil public key send to a peer
|
||||
@@ -949,9 +1163,6 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
Special thanks to external contributors on this release: @jon-certik, @gracenoah, @PSalant726, @gchaincl
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -987,9 +1198,6 @@ guide.
|
||||
Special thanks to external contributors on this release:
|
||||
@gchaincl, @bluele, @climber73
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [consensus] [\#3839](https://github.com/tendermint/tendermint/issues/3839) Reduce "Error attempting to add vote" message severity (Error -> Info)
|
||||
@@ -1010,9 +1218,6 @@ program](https://hackerone.com/tendermint).
|
||||
Special thanks to external contributors on this release:
|
||||
@ruseinov, @bluele, @guagualvcha
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1052,9 +1257,6 @@ This release contains a minor enhancement to the ABCI and some breaking changes
|
||||
- CheckTx requests include a `CheckTxType` enum that can be set to `Recheck` to indicate to the application that this transaction was already checked/validated and certain expensive operations (like checking signatures) can be skipped
|
||||
- Removed various functions from `libs` pkgs
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1081,7 +1283,7 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
- [p2p] [\#3338](https://github.com/tendermint/tendermint/issues/3338) Prevent "sent next PEX request too soon" errors by not calling
|
||||
ensurePeers outside of ensurePeersRoutine
|
||||
- [behaviour] [\3772](https://github.com/tendermint/tendermint/pull/3772) Return correct reason in MessageOutOfOrder (@jim380)
|
||||
- [behavior] [\3772](https://github.com/tendermint/tendermint/pull/3772) Return correct reason in MessageOutOfOrder (@jim380)
|
||||
- [config] [\#3723](https://github.com/tendermint/tendermint/issues/3723) Add consensus_params to testnet config generation; document time_iota_ms (@ashleyvega)
|
||||
|
||||
|
||||
@@ -1100,9 +1302,6 @@ and the RPC, namely:
|
||||
[docs](https://github.com/tendermint/tendermint/blob/60827f75623b92eff132dc0eff5b49d2025c591e/docs/spec/abci/abci.md#events)
|
||||
- Bind RPC to localhost by default, not to the public interface [UPGRADING/RPC_Changes](./UPGRADING.md#rpc_changes)
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
@@ -1203,9 +1402,6 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
|
||||
@@ -1225,9 +1421,6 @@ identified and fixed here.
|
||||
Special thanks to [elvishacker](https://hackerone.com/elvishacker) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1254,9 +1447,6 @@ accepting new peers and only allowing `ed25519` pubkeys.
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for pointing
|
||||
this out.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Only allow ed25519 pubkeys when connecting
|
||||
@@ -1272,9 +1462,6 @@ All clients are recommended to upgrade. See
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for discovering
|
||||
and reporting this issue.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Fix for panic on nil public key send to a peer
|
||||
@@ -1416,7 +1603,7 @@ It brings back `NetAddress()` to `NodeInfo` and uses it instead of `SocketAddr`
|
||||
Additionally, it improves response time on the `/validators` or `/status` RPC endpoints.
|
||||
As a side-effect it makes these RPC endpoint more difficult to DoS and fixes a performance degradation in `ExecCommitBlock`.
|
||||
Also, it contains an [ADR](https://github.com/tendermint/tendermint/pull/3539) that proposes decoupling the
|
||||
responsibility for peer behaviour from the `p2p.Switch` (by @brapse).
|
||||
responsibility for peer behavior from the `p2p.Switch` (by @brapse).
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@brapse, @guagualvcha, @mydring
|
||||
@@ -1569,9 +1756,6 @@ See the [v0.31.0
|
||||
Milestone](https://github.com/tendermint/tendermint/milestone/19?closed=1) for
|
||||
more details.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
@@ -1582,7 +1766,7 @@ program](https://hackerone.com/tendermint).
|
||||
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique clientIDs with open subscriptions. Configurable via `rpc.max_subscription_clients`
|
||||
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique queries a given client can subscribe to at once. Configurable via `rpc.max_subscriptions_per_client`.
|
||||
- [rpc] [\#3435](https://github.com/tendermint/tendermint/issues/3435) Default ReadTimeout and WriteTimeout changed to 10s. WriteTimeout can increased by setting `rpc.timeout_broadcast_tx_commit` in the config.
|
||||
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
|
||||
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
|
||||
|
||||
* Apps
|
||||
- [abci] [\#3403](https://github.com/tendermint/tendermint/issues/3403) Remove `time_iota_ms` from BlockParams. This is a
|
||||
@@ -1635,7 +1819,7 @@ program](https://hackerone.com/tendermint).
|
||||
- [blockchain] [\#3358](https://github.com/tendermint/tendermint/pull/3358) Fix timer leak in `BlockPool` (@guagualvcha)
|
||||
- [cmd] [\#3408](https://github.com/tendermint/tendermint/issues/3408) Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo)
|
||||
- [libs/db/remotedb/grpcdb] [\#3402](https://github.com/tendermint/tendermint/issues/3402) Close Iterator/ReverseIterator after use
|
||||
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md)
|
||||
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-033-pubsub.md)
|
||||
- [lite] [\#3364](https://github.com/tendermint/tendermint/issues/3364) Fix `/validators` and `/abci_query` proxy endpoints
|
||||
(@guagualvcha)
|
||||
- [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection
|
||||
@@ -1702,7 +1886,7 @@ For more, see issues marked
|
||||
|
||||
This release also includes a fix to prevent Tendermint from including the same
|
||||
piece of evidence in more than one block. This issue was reported by @chengwenxi in our
|
||||
[bug bounty program](https://hackerone.com/tendermint).
|
||||
[bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -1791,9 +1975,6 @@ This release contains two important fixes: one for p2p layer where we sometimes
|
||||
were not closing connections and one for consensus layer where consensus with
|
||||
no empty blocks (`create_empty_blocks = false`) could halt.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [pex] [\#3037](https://github.com/tendermint/tendermint/issues/3037) Only log "Reached max attempts to dial" once
|
||||
- [rpc] [\#3159](https://github.com/tendermint/tendermint/issues/3159) Expose
|
||||
@@ -1832,9 +2013,6 @@ While we are trying to stabilize the Block protocol to preserve compatibility
|
||||
with old chains, there may be some final changes yet to come before Cosmos
|
||||
launch as we continue to audit and test the software.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
@@ -1882,9 +2060,6 @@ program](https://hackerone.com/tendermint).
|
||||
Special thanks to external contributors on this release:
|
||||
@HaoyangLiu
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BUG FIXES:
|
||||
- [consensus] Fix consensus halt from proposing blocks with too much evidence
|
||||
|
||||
@@ -2012,9 +2187,6 @@ Special thanks to @dlguddus for discovering a [major
|
||||
issue](https://github.com/tendermint/tendermint/issues/2718#issuecomment-440888677)
|
||||
in the proposer selection algorithm.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
This release is primarily about fixes to the proposer selection algorithm
|
||||
in preparation for the [Cosmos Game of
|
||||
Stakes](https://blog.cosmos.network/the-game-of-stakes-is-open-for-registration-83a404746ee6).
|
||||
@@ -2076,9 +2248,6 @@ Special thanks to external contributors on this release:
|
||||
@ackratos, @goolAdapter, @james-ray, @joe-bowman, @kostko,
|
||||
@nagarajmanjunath, @tomtau
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc] [\#2747](https://github.com/tendermint/tendermint/issues/2747) Enable subscription to tags emitted from `BeginBlock`/`EndBlock` (@kostko)
|
||||
@@ -2102,8 +2271,8 @@ program](https://hackerone.com/tendermint).
|
||||
- [blockchain] [\#2731](https://github.com/tendermint/tendermint/issues/2731) Retry both blocks if either is bad to avoid getting stuck during fast sync (@goolAdapter)
|
||||
- [consensus] [\#2893](https://github.com/tendermint/tendermint/issues/2893) Use genDoc.Validators instead of state.NextValidators on replay when appHeight==0 (@james-ray)
|
||||
- [log] [\#2868](https://github.com/tendermint/tendermint/issues/2868) Fix `module=main` setting overriding all others
|
||||
- NOTE: this changes the default logging behaviour to be much less verbose.
|
||||
Set `log_level="info"` to restore the previous behaviour.
|
||||
- NOTE: this changes the default logging behavior to be much less verbose.
|
||||
Set `log_level="info"` to restore the previous behavior.
|
||||
- [rpc] [\#2808](https://github.com/tendermint/tendermint/issues/2808) Fix `accum` field in `/validators` by calling `IncrementAccum` if necessary
|
||||
- [rpc] [\#2811](https://github.com/tendermint/tendermint/issues/2811) Allow integer IDs in JSON-RPC requests (@tomtau)
|
||||
- [txindex/kv] [\#2759](https://github.com/tendermint/tendermint/issues/2759) Fix tx.height range queries
|
||||
@@ -2117,9 +2286,6 @@ program](https://hackerone.com/tendermint).
|
||||
Special thanks to external contributors on this release:
|
||||
@danil-lashin, @kevlubkcm, @krhubert, @srmo
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* Go API
|
||||
@@ -2163,8 +2329,6 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
Special thanks to external contributors on this release: @hleb-albau, @zhuzeyu
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc] [\#2582](https://github.com/tendermint/tendermint/issues/2582) Enable CORS on RPC API (@hleb-albau)
|
||||
@@ -2182,8 +2346,6 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
|
||||
Special thanks to external contributors on this release: @katakonst
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [consensus] [\#2704](https://github.com/tendermint/tendermint/issues/2704) Simplify valid POL round logic
|
||||
@@ -2211,7 +2373,7 @@ Special thanks to external contributors on this release:
|
||||
@james-ray, @overbool, @phymbert, @Slamper, @Uzair1995, @yutianwu.
|
||||
|
||||
Special thanks to @Slamper for a series of bug reports in our [bug bounty
|
||||
program](https://hackerone.com/tendermint) which are fixed in this release.
|
||||
program](https://hackerone.com/cosmos) which are fixed in this release.
|
||||
|
||||
This release is primarily about adding Version fields to various data structures,
|
||||
optimizing consensus messages for signing and verification in
|
||||
@@ -2241,7 +2403,7 @@ increasing attention to backwards compatibility. Thanks for bearing with us!
|
||||
* [state] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version field to State, breaking the format of State as
|
||||
encoded on disk.
|
||||
* [rpc] [\#2298](https://github.com/tendermint/tendermint/issues/2298) `/abci_query` takes `prove` argument instead of `trusted` and switches the default
|
||||
behaviour to `prove=false`
|
||||
behavior to `prove=false`
|
||||
* [rpc] [\#2654](https://github.com/tendermint/tendermint/issues/2654) Remove all `node_info.other.*_version` fields in `/status` and
|
||||
`/net_info`
|
||||
* [rpc] [\#2636](https://github.com/tendermint/tendermint/issues/2636) Remove
|
||||
@@ -2355,9 +2517,7 @@ Special thanks to external contributors on this release:
|
||||
This release is mostly about the ConsensusParams - removing fields and enforcing MaxGas.
|
||||
It also addresses some issues found via security audit, removes various unused
|
||||
functions from `libs/common`, and implements
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-012-peer-transport.md).
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-012-peer-transport.md).
|
||||
|
||||
BREAKING CHANGES:
|
||||
|
||||
@@ -2388,7 +2548,7 @@ FEATURES:
|
||||
- [libs] [\#2286](https://github.com/tendermint/tendermint/issues/2286) Panic if `autofile` or `db/fsdb` permissions change from 0600.
|
||||
|
||||
IMPROVEMENTS:
|
||||
- [libs/db] [\#2371](https://github.com/tendermint/tendermint/issues/2371) Output error instead of panic when the given `db_backend` is not initialised (@bradyjoestar)
|
||||
- [libs/db] [\#2371](https://github.com/tendermint/tendermint/issues/2371) Output error instead of panic when the given `db_backend` is not initialized (@bradyjoestar)
|
||||
- [mempool] [\#2399](https://github.com/tendermint/tendermint/issues/2399) Make mempool cache a proper LRU (@bradyjoestar)
|
||||
- [p2p] [\#2126](https://github.com/tendermint/tendermint/issues/2126) Introduce PeerTransport interface to improve isolation of concerns
|
||||
- [libs/common] [\#2326](https://github.com/tendermint/tendermint/issues/2326) Service returns ErrNotStarted
|
||||
@@ -2438,7 +2598,7 @@ BREAKING CHANGES:
|
||||
- [abci] Added address of the original proposer of the block to Header
|
||||
- [abci] Change ABCI Header to match Tendermint exactly
|
||||
- [abci] [\#2159](https://github.com/tendermint/tendermint/issues/2159) Update use of `Validator` (see
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
- Remove PubKey from `Validator` (so it's just Address and Power)
|
||||
- Introduce `ValidatorUpdate` (with just PubKey and Power)
|
||||
- InitChain and EndBlock use ValidatorUpdate
|
||||
@@ -2460,7 +2620,7 @@ BREAKING CHANGES:
|
||||
- [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block (!)
|
||||
- Add NextValidatorSet to State, changes on-disk representation of state
|
||||
- [state] [\#2184](https://github.com/tendermint/tendermint/issues/2184) Enforce ConsensusParams.BlockSize.MaxBytes (See
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-020-block-size.md)).
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-020-block-size.md)).
|
||||
- Remove ConsensusParams.BlockSize.MaxTxs
|
||||
- Introduce maximum sizes for all components of a block, including ChainID
|
||||
- [types] Updates to the block Header:
|
||||
@@ -2471,7 +2631,7 @@ BREAKING CHANGES:
|
||||
- [consensus] [\#2203](https://github.com/tendermint/tendermint/issues/2203) Implement BFT time
|
||||
- Timestamp in block must be monotonic and equal the median of timestamps in block's LastCommit
|
||||
- [crypto] [\#2239](https://github.com/tendermint/tendermint/issues/2239) Secp256k1 signature changes (See
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-014-secp-malleability.md)):
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-014-secp-malleability.md)):
|
||||
- format changed from DER to `r || s`, both little endian encoded as 32 bytes.
|
||||
- malleability removed by requiring `s` to be in canonical form.
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Unreleased Changes
|
||||
|
||||
## vX.X
|
||||
## v0.37.0
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
@@ -9,53 +9,31 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
|
||||
- [config] \#5728 `fast_sync = "v1"` is no longer supported (@melekes)
|
||||
- [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
|
||||
- [cli] \#5777 use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters)
|
||||
|
||||
- [config] \#9259 Rename the fastsync section and the fast_sync key blocksync and block_sync respectively
|
||||
|
||||
- Apps
|
||||
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
|
||||
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
|
||||
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
|
||||
|
||||
- [abci/counter] \#6684 Delete counter example app
|
||||
- [txResults] \#9175 Remove `gas_used` & `gas_wanted` from being merkelized in the lastresulthash in the header
|
||||
- [abci] \#5783 Make length delimiter encoding consistent (`uint64`) between ABCI and P2P wire-level protocols
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- Go API
|
||||
- [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes)
|
||||
- [p2p] Removed unused function `MakePoWTarget`. (@erikgrinaker)
|
||||
- [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes)
|
||||
- [proto/p2p] Renamed `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
|
||||
- [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker)
|
||||
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
|
||||
- [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters)
|
||||
- [state] \#5864 Use an iterator when pruning state (@cmwaters)
|
||||
|
||||
- [all] \#9144 Change spelling from British English to American (@cmwaters)
|
||||
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
- Data Storage
|
||||
- [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters)
|
||||
|
||||
### FEATURES
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778)
|
||||
- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys.
|
||||
- [privval] \#5725 Add gRPC support to private validator.
|
||||
- [privval] \#5876 `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778)
|
||||
- [abci/client] \#5673 `Async` requests return an error if queue is full (@melekes)
|
||||
- [mempool] \#5673 Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes)
|
||||
- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
|
||||
- [blockchain/v1] \#5728 Remove in favor of v2 (@melekes)
|
||||
- [blockchain/v0] \#5741 Relax termination conditions and increase sync timeout (@melekes)
|
||||
- [cli] \#5772 `gen_node_key` output now contains node ID (`id` field) (@melekes)
|
||||
- [blockchain/v2] \#5774 Send status request when new peer joins (@melekes)
|
||||
- [consensus] \#5792 Deprecates the `time_iota_ms` consensus parameter, to reduce the bug surface. The parameter is no longer used. (@valardragon)
|
||||
- [config] \#9054 Configuration flag added to permit Tendermint to discard all ABCIResponses except for the most recent
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [types] \#5523 Change json naming of `PartSetHeader` within `BlockID` from `parts` to `part_set_header` (@marbar3778)
|
||||
- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
|
||||
- [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes)
|
||||
- [blockchain/v1] \#5711 Fix deadlock (@melekes)
|
||||
- [consensus] \#9229 fix round number of `enterPropose` when handling `RoundStepNewRound` timeout. (@fatcat22)
|
||||
- [docker] \#9073 enable cross platform build using docker buildx
|
||||
|
||||
@@ -1,59 +1,109 @@
|
||||
# The Tendermint Code of Conduct
|
||||
|
||||
This code of conduct applies to all projects run by the Tendermint/COSMOS team and hence to tendermint.
|
||||
|
||||
This code of conduct applies to all projects run by the Tendermint/COSMOS team
|
||||
and hence to Tendermint.
|
||||
|
||||
----
|
||||
|
||||
|
||||
# Conduct
|
||||
|
||||
## Contact: conduct@tendermint.com
|
||||
|
||||
* We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic.
|
||||
* We are committed to providing a friendly, safe and welcoming environment for
|
||||
all, regardless of level of experience, gender, gender identity and
|
||||
expression, sexual orientation, disability, personal appearance, body size,
|
||||
race, ethnicity, age, religion, nationality, or other similar characteristic.
|
||||
|
||||
* On Slack, please avoid using overtly sexual nicknames or other nicknames that might detract from a friendly, safe and welcoming environment for all.
|
||||
* On Slack, please avoid using overtly sexual nicknames or other nicknames that
|
||||
might detract from a friendly, safe and welcoming environment for all.
|
||||
|
||||
* Please be kind and courteous. There’s no need to be mean or rude.
|
||||
|
||||
* Respect that people have differences of opinion and that every design or implementation choice carries a trade-off and numerous costs. There is seldom a right answer.
|
||||
* Respect that people have differences of opinion and that every design or
|
||||
implementation choice carries a trade-off and numerous costs. There is seldom
|
||||
a right answer.
|
||||
|
||||
* Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works.
|
||||
* Please keep unstructured critique to a minimum. If you have solid ideas you
|
||||
want to experiment with, make a fork and see how it works.
|
||||
|
||||
* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term “harassment” as including the definition in the [Citizen Code of Conduct](http://citizencodeofconduct.org/); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don’t tolerate behavior that excludes people in socially marginalized groups.
|
||||
* We will exclude you from interaction if you insult, demean or harass anyone.
|
||||
That is not welcome behavior. We interpret the term “harassment” as including
|
||||
the definition in the [Citizen Code of Conduct][ccoc]; if you have any lack of
|
||||
clarity about what might be included in that concept, please read their
|
||||
definition. In particular, we don’t tolerate behavior that excludes people in
|
||||
socially marginalized groups.
|
||||
|
||||
* Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel admins or the person mentioned above immediately. Whether you’re a regular contributor or a newcomer, we care about making this community a safe place for you and we’ve got your back.
|
||||
|
||||
* Likewise any spamming, trolling, flaming, baiting or other attention-stealing behaviour is not welcome.
|
||||
* Private harassment is also unacceptable. No matter who you are, if you feel
|
||||
you have been or are being harassed or made uncomfortable by a community
|
||||
member, please contact one of the channel admins or the person mentioned above
|
||||
immediately. Whether you’re a regular contributor or a newcomer, we care about
|
||||
making this community a safe place for you and we’ve got your back.
|
||||
|
||||
* Likewise any spamming, trolling, flaming, baiting or other attention-stealing
|
||||
behavior is not welcome.
|
||||
|
||||
----
|
||||
|
||||
|
||||
# Moderation
|
||||
|
||||
These are the policies for upholding our community’s standards of conduct. If you feel that a thread needs moderation, please contact the above mentioned person.
|
||||
These are the policies for upholding our community’s standards of conduct. If
|
||||
you feel that a thread needs moderation, please contact the above mentioned
|
||||
person.
|
||||
|
||||
1. Remarks that violate the Tendermint/COSMOS standards of conduct, including hateful, hurtful, oppressive, or exclusionary remarks, are not allowed. (Cursing is allowed, but never targeting another user, and never in a hateful manner.)
|
||||
1. Remarks that violate the Tendermint/COSMOS standards of conduct, including
|
||||
hateful, hurtful, oppressive, or exclusionary remarks, are not allowed.
|
||||
(Cursing is allowed, but never targeting another user, and never in a hateful
|
||||
manner.)
|
||||
|
||||
2. Remarks that moderators find inappropriate, whether listed in the code of conduct or not, are also not allowed.
|
||||
2. Remarks that moderators find inappropriate, whether listed in the code of
|
||||
conduct or not, are also not allowed.
|
||||
|
||||
3. Moderators will first respond to such remarks with a warning.
|
||||
|
||||
4. If the warning is unheeded, the user will be “kicked,” i.e., kicked out of the communication channel to cool off.
|
||||
4. If the warning is unheeded, the user will be “kicked,” i.e., kicked out of
|
||||
the communication channel to cool off.
|
||||
|
||||
5. If the user comes back and continues to make trouble, they will be banned, i.e., indefinitely excluded.
|
||||
5. If the user comes back and continues to make trouble, they will be banned,
|
||||
i.e., indefinitely excluded.
|
||||
|
||||
6. Moderators may choose at their discretion to un-ban the user if it was a first offense and they offer the offended party a genuine apology.
|
||||
6. Moderators may choose at their discretion to un-ban the user if it was a
|
||||
first offense and they offer the offended party a genuine apology.
|
||||
|
||||
7. If a moderator bans someone and you think it was unjustified, please take it up with that moderator, or with a different moderator, in private. Complaints about bans in-channel are not allowed.
|
||||
7. If a moderator bans someone and you think it was unjustified, please take it
|
||||
up with that moderator, or with a different moderator, in private. Complaints
|
||||
about bans in-channel are not allowed.
|
||||
|
||||
8. Moderators are held to a higher standard than other community members. If a moderator creates an inappropriate situation, they should expect less leeway than others.
|
||||
8. Moderators are held to a higher standard than other community members. If a
|
||||
moderator creates an inappropriate situation, they should expect less leeway
|
||||
than others.
|
||||
|
||||
In the Tendermint/COSMOS community we strive to go the extra step to look out for each other. Don’t just aim to be technically unimpeachable, try to be your best self. In particular, avoid flirting with offensive or sensitive issues, particularly if they’re off-topic; this all too often leads to unnecessary fights, hurt feelings, and damaged trust; worse, it can drive people away from the community entirely.
|
||||
In the Tendermint/COSMOS community we strive to go the extra step to look out
|
||||
for each other. Don’t just aim to be technically unimpeachable, try to be your
|
||||
best self. In particular, avoid flirting with offensive or sensitive issues,
|
||||
particularly if they’re off-topic; this all too often leads to unnecessary
|
||||
fights, hurt feelings, and damaged trust; worse, it can drive people away
|
||||
from the community entirely.
|
||||
|
||||
And if someone takes issue with something you said or did, resist the urge to be defensive. Just stop doing what it was they complained about and apologize. Even if you feel you were misinterpreted or unfairly accused, chances are good there was something you could’ve communicated better — remember that it’s your responsibility to make your fellow Cosmonauts comfortable. Everyone wants to get along and we are all here first and foremost because we want to talk about cool technology. You will find that people will be eager to assume good intent and forgive as long as you earn their trust.
|
||||
And if someone takes issue with something you said or did, resist the urge to be
|
||||
defensive. Just stop doing what it was they complained about and apologize. Even
|
||||
if you feel you were misinterpreted or unfairly accused, chances are good there
|
||||
was something you could’ve communicated better — remember that it’s your
|
||||
responsibility to make your fellow Cosmonauts comfortable. Everyone wants to
|
||||
get along and we are all here first and foremost because we want to talk
|
||||
about cool technology. You will find that people will be eager to assume
|
||||
good intent and forgive as long as you earn their trust.
|
||||
|
||||
The enforcement policies listed above apply to all official Tendermint/COSMOS venues.For other projects adopting the Tendermint/COSMOS Code of Conduct, please contact the maintainers of those projects for enforcement. If you wish to use this code of conduct for your own project, consider explicitly mentioning your moderation policy or making a copy with your own moderation policy so as to avoid confusion.
|
||||
The enforcement policies listed above apply to all official Tendermint/COSMOS
|
||||
venues. For other projects adopting the Tendermint/COSMOS Code of Conduct,
|
||||
please contact the maintainers of those projects for enforcement. If you wish to
|
||||
use this code of conduct for your own project, consider explicitly mentioning
|
||||
your moderation policy or making a copy with your own moderation policy so as to
|
||||
avoid confusion.
|
||||
|
||||
*Adapted from the [Node.js Policy on Trolling](http://blog.izs.me/post/30036893703/policy-on-trolling), the [Contributor Covenant v1.3.0](http://contributor-covenant.org/version/1/3/0/) and the [Rust Code of Conduct](https://www.rust-lang.org/en-US/conduct.html).
|
||||
\*Adapted from the [Node.js Policy on Trolling][node-trolling-policy], the
|
||||
[Contributor Covenant v1.3.0][ccov] and the [Rust Code of Conduct][rust-coc].
|
||||
|
||||
[ccoc]: https://github.com/stumpsyn/policies/blob/master/citizen_code_of_conduct.md
|
||||
[node-trolling-policy]: http://blog.izs.me/post/30036893703/policy-on-trolling
|
||||
[ccov]: http://contributor-covenant.org/version/1/3/0/
|
||||
[rust-coc]: https://www.rust-lang.org/en-US/conduct.html
|
||||
|
||||
249
CONTRIBUTING.md
249
CONTRIBUTING.md
@@ -26,7 +26,8 @@ will indicate their support with a heartfelt emoji.
|
||||
|
||||
If the issue would benefit from thorough discussion, maintainers may
|
||||
request that you create a [Request For
|
||||
Comment](https://github.com/tendermint/spec/tree/master/rfc). Discussion
|
||||
Comment](https://github.com/tendermint/spec/tree/master/rfc)
|
||||
in the Tendermint spec repo. Discussion
|
||||
at the RFC stage will build collective understanding of the dimensions
|
||||
of the problems and help structure conversations around trade-offs.
|
||||
|
||||
@@ -104,41 +105,46 @@ specify exactly the dependency you want to update, eg.
|
||||
|
||||
## Protobuf
|
||||
|
||||
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/gogo/protobuf) to generate code for use across Tendermint Core.
|
||||
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along
|
||||
with [`gogoproto`](https://github.com/gogo/protobuf) to generate code for use
|
||||
across Tendermint Core.
|
||||
|
||||
For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`.
|
||||
To generate proto stubs, lint, and check protos for breaking changes, you will
|
||||
need to install [buf](https://buf.build/) and `gogoproto`. Then, from the root
|
||||
of the repository, run:
|
||||
|
||||
There are two ways to generate your proto stubs.
|
||||
```bash
|
||||
# Lint all of the .proto files in proto/tendermint
|
||||
make proto-lint
|
||||
|
||||
1. Use Docker, pull an image that will generate your proto stubs with no need to install anything. `make proto-gen-docker`
|
||||
2. Run `make proto-gen` after installing `buf` and `gogoproto`, you can do this by running `make protobuf`.
|
||||
# Check if any of your local changes (prior to committing to the Git repository)
|
||||
# are breaking
|
||||
make proto-check-breaking
|
||||
|
||||
### Installation Instructions
|
||||
|
||||
To install `protoc`, download an appropriate release (<https://github.com/protocolbuffers/protobuf>) and then move the provided binaries into your PATH (follow instructions in README included with the download).
|
||||
|
||||
To install `gogoproto`, do the following:
|
||||
|
||||
```sh
|
||||
go get github.com/gogo/protobuf/gogoproto
|
||||
cd $GOPATH/pkg/mod/github.com/gogo/protobuf@v1.3.1 # or wherever go get installs things
|
||||
make install
|
||||
# Generate Go code from the .proto files in proto/tendermint
|
||||
make proto-gen
|
||||
```
|
||||
|
||||
You should now be able to run `make proto-gen` from inside the root Tendermint directory to generate new files from proto files.
|
||||
To automatically format `.proto` files, you will need
|
||||
[`clang-format`](https://clang.llvm.org/docs/ClangFormat.html) installed. Once
|
||||
installed, you can run:
|
||||
|
||||
```bash
|
||||
make proto-format
|
||||
```
|
||||
|
||||
### Visual Studio Code
|
||||
|
||||
If you are a VS Code user, you may want to add the following to your `.vscode/settings.json`:
|
||||
If you are a VS Code user, you may want to add the following to your `.vscode/settings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"protoc": {
|
||||
"options": [
|
||||
"--proto_path=${workspaceRoot}/proto",
|
||||
"--proto_path=${workspaceRoot}/third_party/proto"
|
||||
]
|
||||
}
|
||||
{
|
||||
"protoc": {
|
||||
"options": [
|
||||
"--proto_path=${workspaceRoot}/proto",
|
||||
"--proto_path=${workspaceRoot}/third_party/proto"
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -243,125 +249,86 @@ Fixes #nnnn
|
||||
|
||||
Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though!
|
||||
|
||||
### Release Procedure
|
||||
|
||||
#### Major Release
|
||||
|
||||
This major release process assumes that this release was preceded by release candidates.
|
||||
If there were no release candidates, and you'd like to cut a major release directly from master, see below.
|
||||
|
||||
1. Start on the latest RC branch (`RCx/vX.X.0`).
|
||||
2. Run integration tests.
|
||||
3. Branch off of the RC branch (`git checkout -b release-prep`) and prepare the release:
|
||||
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
||||
and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
(Squashing includes both combining all entries, as well as removing or simplifying
|
||||
any intra-RC changes. It may also help to alphabetize the entries by package name.)
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Open a PR with these changes against the RC branch (`RCx/vX.X.0`).
|
||||
5. Once these changes are on the RC branch, branch off of the RC branch again to create a release branch:
|
||||
- `git checkout RCx/vX.X.0`
|
||||
- `git checkout -b release/vX.X.0`
|
||||
6. Push a tag with prepared release details. This will trigger the actual release `vX.X.0`.
|
||||
- `git tag -a vX.X.0 -m 'Release vX.X.0'`
|
||||
- `git push origin vX.X.0`
|
||||
7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
8. Create the long-lived minor release branch `RC0/vX.X.1` for the next point release on this
|
||||
new major release series.
|
||||
|
||||
##### Major Release (from `master`)
|
||||
|
||||
1. Start on `master`
|
||||
2. Run integration tests (see `test_integrations` in Makefile)
|
||||
3. Prepare release in a pull request against `master` (to be squash merged):
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`; if this release
|
||||
had release candidates, squash all the RC updates into one
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest
|
||||
release, and add the github aliases of external contributors to the top of
|
||||
the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
- Make sure all significant breaking changes are covered in `UPGRADING.md`
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Push a tag with prepared release details (this will trigger the release `vX.X.0`)
|
||||
- `git tag -a vX.X.x -m 'Release vX.X.x'`
|
||||
- `git push origin vX.X.x`
|
||||
5. Update the `CHANGELOG.md` file on master with the releases changelog.
|
||||
6. Delete any RC branches and tags for this release (if applicable)
|
||||
|
||||
#### Minor Release (Point Releases)
|
||||
|
||||
Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master.
|
||||
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, and
|
||||
the backport branches have names like `v0.34.x` or `v0.33.x` (literally, `x`; it is not a placeholder in this case).
|
||||
|
||||
As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches.
|
||||
|
||||
Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate.
|
||||
|
||||
To create a minor release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout vX.X.x`
|
||||
2. Run integration tests: `make test_integrations`
|
||||
3. Check out a new branch and prepare the release:
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during minor releases, and only field additions are valid minor changes.)
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Open a PR with these changes that will land them back on `vX.X.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a vX.X.x -m 'Release vX.X.x'`
|
||||
- `git push origin vX.X.x`
|
||||
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:minor` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into master.
|
||||
|
||||
#### Release Candidates
|
||||
|
||||
Before creating an official release, especially a major release, we may want to create a
|
||||
release candidate (RC) for our friends and partners to test out. We use git tags to
|
||||
create RCs, and we build them off of RC branches. RC branches typically have names formatted
|
||||
like `RCX/vX.X.X` (or, concretely, `RC0/v0.34.0`), while the tags themselves follow
|
||||
the "standard" release naming conventions, with `-rcX` at the end (`vX.X.X-rcX`).
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
|
||||
have distinct names from the tags/release names.)
|
||||
|
||||
1. Start from the RC branch (e.g. `RC0/v0.34.0`).
|
||||
2. Create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.34.0-rc0 -m "Release Candidate v0.34.0-rc0`
|
||||
3. Push the tag back up to origin:
|
||||
`git push origin v0.34.0-rc4`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
4. Create a new release candidate branch for any possible updates to the RC:
|
||||
`git checkout -b RC1/v0.34.0; git push origin RC1/v0.34.0`
|
||||
|
||||
## Testing
|
||||
|
||||
All repos should be hooked up to [CircleCI](https://circleci.com/).
|
||||
### Unit tests
|
||||
|
||||
If they have `.go` files in the root directory, they will be automatically
|
||||
tested by circle using `go test -v -race ./...`. If not, they will need a
|
||||
`circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and
|
||||
includes its continuous integration status using a badge in the `README.md`.
|
||||
Unit tests are located in `_test.go` files as directed by [the Go testing
|
||||
package](https://golang.org/pkg/testing/). If you're adding or removing a
|
||||
function, please check there's a `TestType_Method` test for it.
|
||||
|
||||
Run: `make test`
|
||||
|
||||
### Integration tests
|
||||
|
||||
Integration tests are also located in `_test.go` files. What differentiates
|
||||
them is a more complicated setup, which usually involves setting up two or more
|
||||
components.
|
||||
|
||||
Run: `make test_integrations`
|
||||
|
||||
### End-to-end tests
|
||||
|
||||
End-to-end tests are used to verify a fully integrated Tendermint network.
|
||||
|
||||
See [README](./test/e2e/README.md) for details.
|
||||
|
||||
Run:
|
||||
|
||||
```sh
|
||||
cd test/e2e && \
|
||||
make && \
|
||||
./build/runner -f networks/ci.toml
|
||||
```
|
||||
|
||||
### Model-based tests (ADVANCED)
|
||||
|
||||
*NOTE: if you're just submitting your first PR, you won't need to touch these
|
||||
most probably (99.9%)*.
|
||||
|
||||
For components, that have been [formally
|
||||
verified](https://en.wikipedia.org/wiki/Formal_verification) using
|
||||
[TLA+](https://en.wikipedia.org/wiki/TLA%2B), it may be possible to generate
|
||||
tests using a combination of the [Apalache Model
|
||||
Checker](https://apalache.informal.systems/) and [tendermint-rs testgen
|
||||
util](https://github.com/informalsystems/tendermint-rs/tree/master/testgen).
|
||||
|
||||
Now, I know there's a lot to take in. If you want to learn more, check out [
|
||||
this video](https://www.youtube.com/watch?v=aveoIMphzW8) by Andrey Kupriyanov
|
||||
& Igor Konnov.
|
||||
|
||||
At the moment, we have model-based tests for the light client, located in the
|
||||
`./light/mbt` directory.
|
||||
|
||||
Run: `cd light/mbt && go test`
|
||||
|
||||
### Fuzz tests (ADVANCED)
|
||||
|
||||
*NOTE: if you're just submitting your first PR, you won't need to touch these
|
||||
most probably (99.9%)*.
|
||||
|
||||
[Fuzz tests](https://en.wikipedia.org/wiki/Fuzzing) can be found inside the
|
||||
`./test/fuzz` directory. See [README.md](./test/fuzz/README.md) for details.
|
||||
|
||||
Run: `cd test/fuzz && make fuzz-{PACKAGE-COMPONENT}`
|
||||
|
||||
### Jepsen tests (ADVANCED)
|
||||
|
||||
*NOTE: if you're just submitting your first PR, you won't need to touch these
|
||||
most probably (99.9%)*.
|
||||
|
||||
[Jepsen](http://jepsen.io/) tests are used to verify the
|
||||
[linearizability](https://jepsen.io/consistency/models/linearizable) property
|
||||
of the Tendermint consensus. They are located in a separate repository
|
||||
-> <https://github.com/tendermint/jepsen>. Please refer to its README for more
|
||||
information.
|
||||
|
||||
### RPC Testing
|
||||
|
||||
If you contribute to the RPC endpoints it's important to document your changes in the [Openapi file](./rpc/openapi/openapi.yaml)
|
||||
To test your changes you should install `nodejs` and run:
|
||||
**If you contribute to the RPC endpoints it's important to document your
|
||||
changes in the [Openapi file](./rpc/openapi/openapi.yaml)**.
|
||||
|
||||
To test your changes you must install `nodejs` and run:
|
||||
|
||||
```bash
|
||||
npm i -g dredd
|
||||
@@ -369,4 +336,8 @@ make build-linux build-contract-tests-hooks
|
||||
make contract-tests
|
||||
```
|
||||
|
||||
This command will popup a network and check every endpoint against what has been documented
|
||||
**WARNING: these are currently broken due to <https://github.com/apiaryio/dredd>
|
||||
not supporting complete OpenAPI 3**.
|
||||
|
||||
This command will popup a network and check every endpoint against what has
|
||||
been documented.
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
# stage 1 Generate Tendermint Binary
|
||||
FROM golang:1.15-alpine as builder
|
||||
FROM --platform=$BUILDPLATFORM golang:1.18-alpine as builder
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk --no-cache add make
|
||||
COPY / /tendermint
|
||||
WORKDIR /tendermint
|
||||
RUN make build-linux
|
||||
RUN TARGETPLATFORM=$TARGETPLATFORM make build-linux
|
||||
|
||||
# stage 2
|
||||
FROM golang:1.15-alpine
|
||||
@@ -49,7 +49,7 @@ ENV PROXY_APP=kvstore MONIKER=dockernode CHAIN_ID=dockerchain
|
||||
COPY ./DOCKER/docker-entrypoint.sh /usr/local/bin/
|
||||
|
||||
ENTRYPOINT ["docker-entrypoint.sh"]
|
||||
CMD ["start"]
|
||||
CMD ["node"]
|
||||
|
||||
# Expose the data directory as a volume since there's mutable state in there
|
||||
VOLUME [ "$TMHOME" ]
|
||||
|
||||
@@ -32,7 +32,7 @@ A quick example of a built-in app and Tendermint core in one container.
|
||||
|
||||
```sh
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy-app=kvstore
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app=kvstore
|
||||
```
|
||||
|
||||
## Local cluster
|
||||
|
||||
@@ -6,11 +6,11 @@ if [ ! -d "$TMHOME/config" ]; then
|
||||
tendermint init
|
||||
|
||||
sed -i \
|
||||
-e "s/^proxy-app\s*=.*/proxy-app = \"$PROXY_APP\"/" \
|
||||
-e "s/^proxy_app\s*=.*/proxy_app = \"$PROXY_APP\"/" \
|
||||
-e "s/^moniker\s*=.*/moniker = \"$MONIKER\"/" \
|
||||
-e 's/^addr-book-strict\s*=.*/addr-book-strict = false/' \
|
||||
-e 's/^timeout-commit\s*=.*/timeout-commit = "500ms"/' \
|
||||
-e 's/^index-all-tags\s*=.*/index-all-tags = true/' \
|
||||
-e 's/^addr_book_strict\s*=.*/addr_book_strict = false/' \
|
||||
-e 's/^timeout_commit\s*=.*/timeout_commit = "500ms"/' \
|
||||
-e 's/^index_all_tags\s*=.*/index_all_tags = true/' \
|
||||
-e 's,^laddr = "tcp://127.0.0.1:26657",laddr = "tcp://0.0.0.0:26657",' \
|
||||
-e 's/^prometheus\s*=.*/prometheus = true/' \
|
||||
"$TMHOME/config/config.toml"
|
||||
|
||||
230
Makefile
230
Makefile
@@ -1,7 +1,6 @@
|
||||
#!/usr/bin/make -f
|
||||
|
||||
PACKAGES=$(shell go list ./...)
|
||||
BUILDDIR ?= $(CURDIR)/build
|
||||
BUILDDIR?=$(CURDIR)/build
|
||||
OUTPUT?=$(BUILDDIR)/tendermint
|
||||
|
||||
BUILD_TAGS?=tendermint
|
||||
|
||||
@@ -15,7 +14,6 @@ endif
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMCoreSemVer=$(VERSION)
|
||||
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
|
||||
HTTPS_GIT := https://github.com/tendermint/tendermint.git
|
||||
DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf
|
||||
CGO_ENABLED ?= 0
|
||||
|
||||
# handle nostrip
|
||||
@@ -55,65 +53,151 @@ endif
|
||||
# allow users to pass additional flags via the conventional LDFLAGS variable
|
||||
LD_FLAGS += $(LDFLAGS)
|
||||
|
||||
# Process Docker environment varible TARGETPLATFORM
|
||||
# in order to build binary with correspondent ARCH
|
||||
# by default will always build for linux/amd64
|
||||
TARGETPLATFORM ?=
|
||||
GOOS ?= linux
|
||||
GOARCH ?= amd64
|
||||
GOARM ?=
|
||||
|
||||
ifeq (linux/arm,$(findstring linux/arm,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=arm
|
||||
GOARM=7
|
||||
endif
|
||||
|
||||
ifeq (linux/arm/v6,$(findstring linux/arm/v6,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=arm
|
||||
GOARM=6
|
||||
endif
|
||||
|
||||
ifeq (linux/arm64,$(findstring linux/arm64,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=arm64
|
||||
GOARM=7
|
||||
endif
|
||||
|
||||
ifeq (linux/386,$(findstring linux/386,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=386
|
||||
endif
|
||||
|
||||
ifeq (linux/amd64,$(findstring linux/amd64,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=amd64
|
||||
endif
|
||||
|
||||
ifeq (linux/mips,$(findstring linux/mips,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=mips
|
||||
endif
|
||||
|
||||
ifeq (linux/mipsle,$(findstring linux/mipsle,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=mipsle
|
||||
endif
|
||||
|
||||
ifeq (linux/mips64,$(findstring linux/mips64,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=mips64
|
||||
endif
|
||||
|
||||
ifeq (linux/mips64le,$(findstring linux/mips64le,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=mips64le
|
||||
endif
|
||||
|
||||
ifeq (linux/riscv64,$(findstring linux/riscv64,$(TARGETPLATFORM)))
|
||||
GOOS=linux
|
||||
GOARCH=riscv64
|
||||
endif
|
||||
|
||||
all: check build test install
|
||||
.PHONY: all
|
||||
|
||||
# The below include contains the tools.
|
||||
include tools/Makefile
|
||||
include test/Makefile
|
||||
include tests.mk
|
||||
|
||||
###############################################################################
|
||||
### Build Tendermint ###
|
||||
### Build Tendermint ###
|
||||
###############################################################################
|
||||
|
||||
build: $(BUILDDIR)/
|
||||
CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o $(BUILDDIR)/ ./cmd/tendermint/
|
||||
build:
|
||||
CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o $(OUTPUT) ./cmd/tendermint/
|
||||
.PHONY: build
|
||||
|
||||
install:
|
||||
CGO_ENABLED=$(CGO_ENABLED) go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
|
||||
.PHONY: install
|
||||
|
||||
$(BUILDDIR)/:
|
||||
mkdir -p $@
|
||||
###############################################################################
|
||||
### Metrics ###
|
||||
###############################################################################
|
||||
|
||||
metrics: testdata-metrics
|
||||
go generate -run="scripts/metricsgen" ./...
|
||||
.PHONY: metrics
|
||||
|
||||
# By convention, the go tool ignores subdirectories of directories named
|
||||
# 'testdata'. This command invokes the generate command on the folder directly
|
||||
# to avoid this.
|
||||
testdata-metrics:
|
||||
ls ./scripts/metricsgen/testdata | xargs -I{} go generate -v -run="scripts/metricsgen" ./scripts/metricsgen/testdata/{}
|
||||
.PHONY: testdata-metrics
|
||||
|
||||
###############################################################################
|
||||
### Mocks ###
|
||||
###############################################################################
|
||||
|
||||
mockery:
|
||||
go generate -run="./scripts/mockery_generate.sh" ./...
|
||||
.PHONY: mockery
|
||||
|
||||
###############################################################################
|
||||
### Protobuf ###
|
||||
###############################################################################
|
||||
|
||||
proto-all: proto-gen proto-lint proto-check-breaking
|
||||
.PHONY: proto-all
|
||||
check-proto-deps:
|
||||
ifeq (,$(shell which protoc-gen-gogofaster))
|
||||
@go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest
|
||||
endif
|
||||
.PHONY: check-proto-deps
|
||||
|
||||
proto-gen:
|
||||
## If you get the following error,
|
||||
## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory"
|
||||
## See https://stackoverflow.com/a/25518702
|
||||
## Note the $< here is substituted for the %.proto
|
||||
## Note the $@ here is substituted for the %.pb.go
|
||||
@sh scripts/protocgen.sh
|
||||
check-proto-format-deps:
|
||||
ifeq (,$(shell which clang-format))
|
||||
$(error "clang-format is required for Protobuf formatting. See instructions for your platform on how to install it.")
|
||||
endif
|
||||
.PHONY: check-proto-format-deps
|
||||
|
||||
proto-gen: check-proto-deps
|
||||
@echo "Generating Protobuf files"
|
||||
@go run github.com/bufbuild/buf/cmd/buf generate
|
||||
@mv ./proto/tendermint/abci/types.pb.go ./abci/types/
|
||||
.PHONY: proto-gen
|
||||
|
||||
proto-gen-docker:
|
||||
@docker pull -q tendermintdev/docker-build-proto
|
||||
@echo "Generating Protobuf files"
|
||||
@docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto sh ./scripts/protocgen.sh
|
||||
.PHONY: proto-gen-docker
|
||||
|
||||
proto-lint:
|
||||
@$(DOCKER_BUF) check lint --error-format=json
|
||||
# These targets are provided for convenience and are intended for local
|
||||
# execution only.
|
||||
proto-lint: check-proto-deps
|
||||
@echo "Linting Protobuf files"
|
||||
@go run github.com/bufbuild/buf/cmd/buf lint
|
||||
.PHONY: proto-lint
|
||||
|
||||
proto-format:
|
||||
proto-format: check-proto-format-deps
|
||||
@echo "Formatting Protobuf files"
|
||||
docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \;
|
||||
@find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \;
|
||||
.PHONY: proto-format
|
||||
|
||||
proto-check-breaking:
|
||||
@$(DOCKER_BUF) check breaking --against-input .git#branch=master
|
||||
proto-check-breaking: check-proto-deps
|
||||
@echo "Checking for breaking changes in Protobuf files against local branch"
|
||||
@echo "Note: This is only useful if your changes have not yet been committed."
|
||||
@echo " Otherwise read up on buf's \"breaking\" command usage:"
|
||||
@echo " https://docs.buf.build/breaking/usage"
|
||||
@go run github.com/bufbuild/buf/cmd/buf breaking --against ".git"
|
||||
.PHONY: proto-check-breaking
|
||||
|
||||
proto-check-breaking-ci:
|
||||
@$(DOCKER_BUF) check breaking --against-input $(HTTPS_GIT)#branch=master
|
||||
@go run github.com/bufbuild/buf/cmd/buf breaking --against $(HTTPS_GIT)#branch=v0.34.x
|
||||
.PHONY: proto-check-breaking-ci
|
||||
|
||||
###############################################################################
|
||||
@@ -128,27 +212,6 @@ install_abci:
|
||||
@go install -mod=readonly ./abci/cmd/...
|
||||
.PHONY: install_abci
|
||||
|
||||
###############################################################################
|
||||
### Privval Server ###
|
||||
###############################################################################
|
||||
|
||||
build_privval_server:
|
||||
@go build -mod=readonly -o $(BUILDDIR)/ -i ./cmd/priv_val_server/...
|
||||
.PHONY: build_privval_server
|
||||
|
||||
generate_test_cert:
|
||||
# generate self signing ceritificate authority
|
||||
@certstrap init --common-name "root CA" --expires "20 years"
|
||||
# generate server cerificate
|
||||
@certstrap request-cert -cn server -ip 127.0.0.1
|
||||
# self-sign server cerificate with rootCA
|
||||
@certstrap sign server --CA "root CA"
|
||||
# generate client cerificate
|
||||
@certstrap request-cert -cn client -ip 127.0.0.1
|
||||
# self-sign client cerificate with rootCA
|
||||
@certstrap sign client --CA "root CA"
|
||||
.PHONY: generate_test_cert
|
||||
|
||||
###############################################################################
|
||||
### Distribution ###
|
||||
###############################################################################
|
||||
@@ -177,7 +240,7 @@ draw_deps:
|
||||
|
||||
get_deps_bin_size:
|
||||
@# Copy of build recipe with additional flags to perform binary size analysis
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(BUILDDIR)/ ./cmd/tendermint/ 2>&1))
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/ 2>&1))
|
||||
@find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log
|
||||
@echo "Results can be found here: $(CURDIR)/deps_bin_size.log"
|
||||
.PHONY: get_deps_bin_size
|
||||
@@ -221,23 +284,36 @@ DESTINATION = ./index.html.md
|
||||
###############################################################################
|
||||
### Documentation ###
|
||||
###############################################################################
|
||||
# todo remove once tendermint.com DNS is solved
|
||||
|
||||
build-docs:
|
||||
@cd docs && \
|
||||
while read -r branch path_prefix; do \
|
||||
(git checkout $${branch} && npm install && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \
|
||||
(git checkout $${branch} && npm ci && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \
|
||||
mkdir -p ~/output/$${path_prefix} ; \
|
||||
cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \
|
||||
cp ~/output/$${path_prefix}/index.html ~/output ; \
|
||||
done < versions ;
|
||||
.PHONY: build-docs
|
||||
|
||||
sync-docs:
|
||||
cd ~/output && \
|
||||
echo "role_arn = ${DEPLOYMENT_ROLE_ARN}" >> /root/.aws/config ; \
|
||||
echo "CI job = ${CIRCLE_BUILD_URL}" >> version.html ; \
|
||||
aws s3 sync . s3://${WEBSITE_BUCKET} --profile terraform --delete ; \
|
||||
aws cloudfront create-invalidation --distribution-id ${CF_DISTRIBUTION_ID} --profile terraform --path "/*" ;
|
||||
.PHONY: sync-docs
|
||||
|
||||
# Verify that important design docs have ToC entries.
|
||||
check-docs-toc:
|
||||
@./docs/presubmit.sh
|
||||
.PHONY: check-docs-toc
|
||||
|
||||
###############################################################################
|
||||
### Docker image ###
|
||||
###############################################################################
|
||||
|
||||
build-docker: build-linux
|
||||
cp $(BUILDDIR)/tendermint DOCKER/tendermint
|
||||
cp $(OUTPUT) DOCKER/tendermint
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" DOCKER
|
||||
rm -rf DOCKER/tendermint
|
||||
.PHONY: build-docker
|
||||
@@ -247,8 +323,8 @@ build-docker: build-linux
|
||||
###############################################################################
|
||||
|
||||
# Build linux binary on other platforms
|
||||
build-linux: tools
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||
build-linux:
|
||||
GOOS=$(GOOS) GOARCH=$(GOARCH) GOARM=$(GOARM) $(MAKE) build
|
||||
.PHONY: build-linux
|
||||
|
||||
build-docker-localnode:
|
||||
@@ -292,16 +368,24 @@ contract-tests:
|
||||
dredd
|
||||
.PHONY: contract-tests
|
||||
|
||||
clean:
|
||||
rm -rf $(CURDIR)/artifacts/ $(BUILDDIR)/
|
||||
# Implements test splitting and running. This is pulled directly from
|
||||
# the github action workflows for better local reproducibility.
|
||||
|
||||
build-reproducible:
|
||||
docker rm latest-build || true
|
||||
docker run --volume=$(CURDIR):/sources:ro \
|
||||
--env TARGET_PLATFORMS='linux/amd64 linux/arm64 darwin/amd64 windows/amd64' \
|
||||
--env APP=tendermint \
|
||||
--env COMMIT=$(shell git rev-parse --short=8 HEAD) \
|
||||
--env VERSION=$(shell git describe --tags) \
|
||||
--name latest-build cosmossdk/rbuilder:latest
|
||||
docker cp -a latest-build:/home/builder/artifacts/ $(CURDIR)/
|
||||
.PHONY: build-reproducible
|
||||
GO_TEST_FILES != find $(CURDIR) -name "*_test.go"
|
||||
|
||||
# default to four splits by default
|
||||
NUM_SPLIT ?= 4
|
||||
|
||||
$(BUILDDIR):
|
||||
mkdir -p $@
|
||||
|
||||
# The format statement filters out all packages that don't have tests.
|
||||
# Note we need to check for both in-package tests (.TestGoFiles) and
|
||||
# out-of-package tests (.XTestGoFiles).
|
||||
$(BUILDDIR)/packages.txt:$(GO_TEST_FILES) $(BUILDDIR)
|
||||
go list -f "{{ if (or .TestGoFiles .XTestGoFiles) }}{{ .ImportPath }}{{ end }}" ./... | sort > $@
|
||||
|
||||
split-test-packages:$(BUILDDIR)/packages.txt
|
||||
split -d -n l/$(NUM_SPLIT) $< $<.
|
||||
test-group-%:split-test-packages
|
||||
cat $(BUILDDIR)/packages.txt.$* | xargs go test -mod=readonly -timeout=5m -race -coverprofile=$(BUILDDIR)/$*.profile.out
|
||||
|
||||
158
PHILOSOPHY.md
Normal file
158
PHILOSOPHY.md
Normal file
@@ -0,0 +1,158 @@
|
||||
# Design goals
|
||||
|
||||
The design goals for Tendermint (and the SDK and related libraries) are:
|
||||
|
||||
* Simplicity and Legibility
|
||||
* Parallel performance, namely ability to utilize multicore architecture
|
||||
* Ability to evolve the codebase bug-free
|
||||
* Debuggability
|
||||
* Complete correctness that considers all edge cases, esp in concurrency
|
||||
* Future-proof modular architecture, message protocol, APIs, and encapsulation
|
||||
|
||||
|
||||
## Justification
|
||||
|
||||
Legibility is key to maintaining bug-free software as it evolves toward more
|
||||
optimizations, more ease of debugging, and additional features.
|
||||
|
||||
It is too easy to introduce bugs over time by replacing lines of code with
|
||||
those that may panic, which means ideally locks are unlocked by defer
|
||||
statements.
|
||||
|
||||
For example,
|
||||
|
||||
```go
|
||||
func (obj *MyObj) something() {
|
||||
mtx.Lock()
|
||||
obj.something = other
|
||||
mtx.Unlock()
|
||||
}
|
||||
```
|
||||
|
||||
It is too easy to refactor the codebase in the future to replace `other` with
|
||||
`other.String()` for example, and this may introduce a bug that causes a
|
||||
deadlock. So as much as reasonably possible, we need to be using defer
|
||||
statements, even though it introduces additional overhead.
|
||||
|
||||
If it is necessary to optimize the unlocking of mutex locks, the solution is
|
||||
more modularity via smaller functions, so that defer'd unlocks are scoped
|
||||
within a smaller function.
|
||||
|
||||
Similarly, idiomatic for-loops should always be preferred over those that use
|
||||
custom counters, because it is too easy to evolve the body of a for-loop to
|
||||
become more complicated over time, and it becomes more and more difficult to
|
||||
assess the correctness of such a for-loop by visual inspection.
|
||||
|
||||
|
||||
## On performance
|
||||
|
||||
It doesn't matter whether there are alternative implementations that are 2x or
|
||||
3x more performant, when the software doesn't work, deadlocks, or if bugs
|
||||
cannot be debugged. By taking advantage of multicore concurrency, the
|
||||
Tendermint implementation will at least be an order of magnitude within the
|
||||
range of what is theoretically possible. The design philosophy of Tendermint,
|
||||
and the choice of Go as implementation language, is designed to make Tendermint
|
||||
implementation the standard specification for concurrent BFT software.
|
||||
|
||||
By focusing on the message protocols (e.g. ABCI, p2p messages), and
|
||||
encapsulation e.g. IAVL module, (relatively) independent reactors, we are both
|
||||
implementing a standard implementation to be used as the specification for
|
||||
future implementations in more optimizable languages like Rust, Java, and C++;
|
||||
as well as creating sufficiently performant software. Tendermint Core will
|
||||
never be as fast as future implementations of the Tendermint Spec, because Go
|
||||
isn't designed to be as fast as possible. The advantage of using Go is that we
|
||||
can develop the whole stack of modular components **faster** than in other
|
||||
languages.
|
||||
|
||||
Furthermore, the real bottleneck is in the application layer, and it isn't
|
||||
necessary to support more than a sufficiently decentralized set of validators
|
||||
(e.g. 100 ~ 300 validators is sufficient, with delegated bonded PoS).
|
||||
|
||||
Instead of optimizing Tendermint performance down to the metal, lets focus on
|
||||
optimizing on other matters, namely ability to push feature complete software
|
||||
that works well enough, can be debugged and maintained, and can serve as a spec
|
||||
for future implementations.
|
||||
|
||||
|
||||
## On encapsulation
|
||||
|
||||
In order to create maintainable, forward-optimizable software, it is critical
|
||||
to develop well-encapsulated objects that have well understood properties, and
|
||||
to re-use these easy-to-use-correctly components as building blocks for further
|
||||
encapsulated meta-objects.
|
||||
|
||||
For example, mutexes are cheap enough for Tendermint's design goals when there
|
||||
isn't goroutine contention, so it is encouraged to create concurrency safe
|
||||
structures with struct-level mutexes. If they are used in the context of
|
||||
non-concurrent logic, then the performance is good enough. If they are used in
|
||||
the context of concurrent logic, then it will still perform correctly.
|
||||
|
||||
Examples of this design principle can be seen in the types.ValidatorSet struct,
|
||||
and the rand.Rand struct. It's one single struct declaration that can be used
|
||||
in both concurrent and non-concurrent logic, and due to its well encapsulation,
|
||||
it's easy to get the usage of the mutex right.
|
||||
|
||||
### example: rand.Rand
|
||||
|
||||
`The default Source is safe for concurrent use by multiple goroutines, but
|
||||
Sources created by NewSource are not`. The reason why the default
|
||||
package-level source is safe for concurrent use is because it is protected (see
|
||||
`lockedSource` in <https://golang.org/src/math/rand/rand.go>).
|
||||
|
||||
But we shouldn't rely on the global source, we should be creating our own
|
||||
Rand/Source instances and using them, especially for determinism in testing.
|
||||
So it is reasonable to have rand.Rand be protected by a mutex. Whether we want
|
||||
our own implementation of Rand is another question, but the answer there is
|
||||
also in the affirmative. Sometimes you want to know where Rand is being used
|
||||
in your code, so it becomes a simple matter of dropping in a log statement to
|
||||
inject inspectability into Rand usage. Also, it is nice to be able to extend
|
||||
the functionality of Rand with custom methods. For these reasons, and for the
|
||||
reasons which is outlined in this design philosophy document, we should
|
||||
continue to use the rand.Rand object, with mutex protection.
|
||||
|
||||
Another key aspect of good encapsulation is the choice of exposed vs unexposed
|
||||
methods. It should be clear to the reader of the code, which methods are
|
||||
intended to be used in what context, and what safe usage is. Part of this is
|
||||
solved by hiding methods via unexported methods. Another part of this is
|
||||
naming conventions on the methods (e.g. underscores) with good documentation,
|
||||
and code organization. If there are too many exposed methods and it isn't
|
||||
clear what methods have what side effects, then there is something wrong about
|
||||
the design of abstractions that should be revisited.
|
||||
|
||||
|
||||
## On concurrency
|
||||
|
||||
In order for Tendermint to remain relevant in the years to come, it is vital
|
||||
for Tendermint to take advantage of multicore architectures. Due to the nature
|
||||
of the problem, namely consensus across a concurrent p2p gossip network, and to
|
||||
handle RPC requests for a large number of consuming subscribers, it is
|
||||
unavoidable for Tendermint development to require expertise in concurrency
|
||||
design, especially when it comes to the reactor design, and also for RPC
|
||||
request handling.
|
||||
|
||||
|
||||
# Guidelines
|
||||
|
||||
Here are some guidelines for designing for (sufficient) performance and concurrency:
|
||||
|
||||
* Mutex locks are cheap enough when there isn't contention.
|
||||
* Do not optimize code without analytical or observed proof that it is in a hot path.
|
||||
* Don't over-use channels when mutex locks w/ encapsulation are sufficient.
|
||||
* The need to drain channels are often a hint of unconsidered edge cases.
|
||||
* The creation of O(N) one-off goroutines is generally technical debt that
|
||||
needs to get addressed sooner than later. Avoid creating too many
|
||||
goroutines as a patch around incomplete concurrency design, or at least be
|
||||
aware of the debt and do not invest in the debt. On the other hand, Tendermint
|
||||
is designed to have a limited number of peers (e.g. 10 or 20), so the creation
|
||||
of O(C) goroutines per O(P) peers is still O(C\*P=constant).
|
||||
* Use defer statements to unlock as much as possible. If you want to unlock sooner,
|
||||
try to create more modular functions that do make use of defer statements.
|
||||
|
||||
# Mantras
|
||||
|
||||
* Premature optimization kills
|
||||
* Readability is paramount
|
||||
* Beautiful is better than fast.
|
||||
* In the face of ambiguity, refuse the temptation to guess.
|
||||
* In the face of bugs, refuse the temptation to cover the bug.
|
||||
* There should be one-- and preferably only one --obvious way to do it.
|
||||
213
README.md
213
README.md
@@ -2,155 +2,139 @@
|
||||
|
||||

|
||||
|
||||
[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance)
|
||||
[State Machines](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for short.
|
||||
[Byzantine-Fault Tolerant][bft] [State Machine Replication][smr]. Or
|
||||
[Blockchain], for short.
|
||||
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
[](https://pkg.go.dev/github.com/tendermint/tendermint)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://discord.gg/vcExX9T)
|
||||
[](https://github.com/tendermint/tendermint/blob/master/LICENSE)
|
||||
[](https://github.com/tendermint/tendermint)
|
||||
[](https://sourcegraph.com/github.com/tendermint/tendermint?badge)
|
||||
[![Version][version-badge]][version-url]
|
||||
[![API Reference][api-badge]][api-url]
|
||||
[![Go version][go-badge]][go-url]
|
||||
[![Discord chat][discord-badge]][discord-url]
|
||||
[![License][license-badge]][license-url]
|
||||
[![tendermint/tendermint][loc-badge]][loc-url]
|
||||
[![Sourcegraph][sg-badge]][sg-url]
|
||||
|
||||
| Branch | Tests | Coverage | Linting |
|
||||
|--------|--------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------|
|
||||
| master |  | [](https://codecov.io/gh/tendermint/tendermint) |  |
|
||||
| Branch | Tests | Coverage | Linting |
|
||||
|--------|-----------------------|------------------------------------------|---------------------|
|
||||
| main | ![Tests][tests-badge] | [![codecov][codecov-badge]][codecov-url] | ![Lint][lint-badge] |
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
|
||||
and securely replicates it on many machines.
|
||||
Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a
|
||||
state transition machine - written in any programming language - and securely
|
||||
replicates it on many machines.
|
||||
|
||||
For protocol details, see [the specification](https://github.com/tendermint/spec).
|
||||
For protocol details, refer to the [Tendermint Specification](./spec/README.md).
|
||||
|
||||
For detailed analysis of the consensus protocol, including safety and liveness proofs,
|
||||
see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
|
||||
For detailed analysis of the consensus protocol, including safety and liveness
|
||||
proofs, read our paper, "[The latest gossip on BFT
|
||||
consensus](https://arxiv.org/abs/1807.04938)".
|
||||
|
||||
## Documentation
|
||||
|
||||
Complete documentation can be found on the
|
||||
[website](https://docs.tendermint.com/).
|
||||
|
||||
## Releases
|
||||
|
||||
Please do not depend on master as your production branch. Use [releases](https://github.com/tendermint/tendermint/releases) instead.
|
||||
Please do not depend on `main` as your production branch. Use
|
||||
[releases](https://github.com/tendermint/tendermint/releases) instead.
|
||||
|
||||
Tendermint is being used in production in both private and public environments,
|
||||
most notably the blockchains of the [Cosmos Network](https://cosmos.network/).
|
||||
However, we are still making breaking changes to the protocol and the APIs and have not yet released v1.0.
|
||||
See below for more details about [versioning](#versioning).
|
||||
Tendermint has been in the production of private and public environments, most
|
||||
notably the blockchains of the Cosmos Network. we haven't released v1.0 yet
|
||||
since we are making breaking changes to the protocol and the APIs. See below for
|
||||
more details about [versioning](#versioning).
|
||||
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help. You can
|
||||
contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/vcExX9T).
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help.
|
||||
You can contact us [over email](mailto:hello@interchain.io) or [join the
|
||||
chat](https://discord.gg/cosmosnetwork).
|
||||
|
||||
More on how releases are conducted can be found [here](./RELEASES.md).
|
||||
|
||||
## Security
|
||||
|
||||
To report a security vulnerability, see our [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md).
|
||||
program](https://hackerone.com/cosmos). For examples of the kinds of bugs we're
|
||||
looking for, see [our security policy](SECURITY.md).
|
||||
|
||||
We also maintain a dedicated mailing list for security updates. We will only ever use this mailing list
|
||||
to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe [here](http://eepurl.com/gZ5hQD).
|
||||
We also maintain a dedicated mailing list for security updates. We will only
|
||||
ever use this mailing list to notify you of vulnerabilities and fixes in
|
||||
Tendermint Core. You can subscribe [here](http://eepurl.com/gZ5hQD).
|
||||
|
||||
## Minimum requirements
|
||||
|
||||
| Requirement | Notes |
|
||||
|-------------|------------------|
|
||||
| Go version | Go1.15 or higher |
|
||||
|
||||
## Documentation
|
||||
|
||||
Complete documentation can be found on the [website](https://docs.tendermint.com/master/).
|
||||
| Requirement | Notes |
|
||||
|-------------|-------------------|
|
||||
| Go version | Go 1.18 or higher |
|
||||
|
||||
### Install
|
||||
|
||||
See the [install instructions](/docs/introduction/install.md).
|
||||
See the [install instructions](./docs/introduction/install.md).
|
||||
|
||||
### Quick Start
|
||||
|
||||
- [Single node](/docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](/docs/networks/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](/docs/networks/terraform-and-ansible.md)
|
||||
- [Join the Cosmos testnet](https://cosmos.network/testnet)
|
||||
- [Single node](./docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](./docs/tools/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](./docs/tools/terraform-and-ansible.md)
|
||||
|
||||
## Contributing
|
||||
|
||||
Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions.
|
||||
|
||||
Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md)
|
||||
and the [style guide](STYLE_GUIDE.md). You may also find it helpful to read the
|
||||
[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md),
|
||||
and familiarize yourself with our
|
||||
[Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture).
|
||||
Before contributing to the project, please take a look at the [contributing
|
||||
guidelines](CONTRIBUTING.md) and the [style guide](STYLE_GUIDE.md). You may also
|
||||
find it helpful to read the [specifications](./spec/README.md), and familiarize
|
||||
yourself with our [Architectural Decision Records
|
||||
(ADRs)](./docs/architecture/README.md) and
|
||||
[Request For Comments (RFCs)](./docs/rfc/README.md).
|
||||
|
||||
## Versioning
|
||||
|
||||
### Semantic Versioning
|
||||
|
||||
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and how the version changes.
|
||||
According to SemVer, anything in the public API can change at any time before version 1.0.0
|
||||
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and
|
||||
how the version changes. According to SemVer, anything in the public API can
|
||||
change at any time before version 1.0.0
|
||||
|
||||
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
|
||||
to signal breaking changes across a subset of the total public API. This subset includes all
|
||||
interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not
|
||||
include the Go APIs.
|
||||
To provide some stability to users of 0.X.X versions of Tendermint, the MINOR
|
||||
version is used to signal breaking changes across Tendermint's API. This API
|
||||
includes all publicly exposed types, functions, and methods in non-internal Go
|
||||
packages as well as the types and methods accessible via the Tendermint RPC
|
||||
interface.
|
||||
|
||||
That said, breaking changes in the following packages will be documented in the
|
||||
CHANGELOG even if they don't lead to MINOR version bumps:
|
||||
|
||||
- crypto
|
||||
- config
|
||||
- libs
|
||||
- bits
|
||||
- bytes
|
||||
- json
|
||||
- log
|
||||
- math
|
||||
- net
|
||||
- os
|
||||
- protoio
|
||||
- rand
|
||||
- sync
|
||||
- strings
|
||||
- service
|
||||
- node
|
||||
- rpc/client
|
||||
- types
|
||||
Breaking changes to these public APIs will be documented in the CHANGELOG.
|
||||
|
||||
### Upgrades
|
||||
|
||||
In an effort to avoid accumulating technical debt prior to 1.0.0,
|
||||
we do not guarantee that breaking changes (ie. bumps in the MINOR version)
|
||||
will work with existing Tendermint blockchains. In these cases you will
|
||||
have to start a new blockchain, or write something custom to get the old
|
||||
data into the new chain. However, any bump in the PATCH version should be
|
||||
compatible with existing blockchain histories.
|
||||
|
||||
In an effort to avoid accumulating technical debt prior to 1.0.0, we do not
|
||||
guarantee that breaking changes (ie. bumps in the MINOR version) will work with
|
||||
existing Tendermint blockchains. In these cases you will have to start a new
|
||||
blockchain, or write something custom to get the old data into the new chain.
|
||||
However, any bump in the PATCH version should be compatible with existing
|
||||
blockchain histories.
|
||||
|
||||
For more information on upgrading, see [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
### Supported Versions
|
||||
|
||||
Because we are a small core team, we only ship patch updates, including security updates,
|
||||
to the most recent minor release and the second-most recent minor release. Consequently,
|
||||
we strongly recommend keeping Tendermint up-to-date. Upgrading instructions can be found
|
||||
in [UPGRADING.md](./UPGRADING.md).
|
||||
Because we are a small core team, we only ship patch updates, including security
|
||||
updates, to the most recent minor release and the second-most recent minor
|
||||
release. Consequently, we strongly recommend keeping Tendermint up-to-date.
|
||||
Upgrading instructions can be found in [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
## Resources
|
||||
|
||||
### Tendermint Core
|
||||
### Libraries
|
||||
|
||||
For details about the blockchain data structures and the p2p protocols, see the
|
||||
[Tendermint specification](https://docs.tendermint.com/master/spec/).
|
||||
|
||||
For details on using the software, see the [documentation](/docs/) which is also
|
||||
hosted at: <https://docs.tendermint.com/master/>
|
||||
|
||||
### Tools
|
||||
|
||||
Benchmarking is provided by [`tm-load-test`](https://github.com/informalsystems/tm-load-test).
|
||||
Additional tooling can be found in [/docs/tools](/docs/tools).
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); A framework for building
|
||||
applications in Golang
|
||||
- [Tendermint in Rust](https://github.com/informalsystems/tendermint-rs)
|
||||
- [ABCI Tower](https://github.com/penumbra-zone/tower-abci)
|
||||
|
||||
### Applications
|
||||
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
||||
- [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
|
||||
- [Many more](https://tendermint.com/ecosystem)
|
||||
- [Cosmos Hub](https://hub.cosmos.network/)
|
||||
- [Terra](https://www.terra.money/)
|
||||
- [Celestia](https://celestia.org/)
|
||||
- [Anoma](https://anoma.network/)
|
||||
- [Vocdoni](https://docs.vocdoni.io/)
|
||||
|
||||
### Research
|
||||
|
||||
@@ -163,8 +147,33 @@ Additional tooling can be found in [/docs/tools](/docs/tools).
|
||||
## Join us!
|
||||
|
||||
Tendermint Core is maintained by [Interchain GmbH](https://interchain.berlin).
|
||||
If you'd like to work full-time on Tendermint Core, [we're hiring](https://interchain-gmbh.breezy.hr/p/682fb7e8a6f601-software-engineer-tendermint-core)!
|
||||
If you'd like to work full-time on Tendermint Core,
|
||||
[we're hiring](https://interchain-gmbh.breezy.hr/)!
|
||||
|
||||
Funding for Tendermint Core development comes primarily from the [Interchain Foundation](https://interchain.io),
|
||||
a Swiss non-profit. The Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the for-profit entity
|
||||
that also maintains [tendermint.com](https://tendermint.com).
|
||||
Funding for Tendermint Core development comes primarily from the
|
||||
[Interchain Foundation](https://interchain.io), a Swiss non-profit. The
|
||||
Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the
|
||||
for-profit entity that also maintains [tendermint.com](https://tendermint.com).
|
||||
|
||||
|
||||
[bft]: https://en.wikipedia.org/wiki/Byzantine_fault_tolerance
|
||||
[smr]: https://en.wikipedia.org/wiki/State_machine_replication
|
||||
[Blockchain]: https://en.wikipedia.org/wiki/Blockchain
|
||||
[version-badge]: https://img.shields.io/github/tag/tendermint/tendermint.svg
|
||||
[version-url]: https://github.com/tendermint/tendermint/releases/latest
|
||||
[api-badge]: https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
|
||||
[api-url]: https://pkg.go.dev/github.com/tendermint/tendermint
|
||||
[go-badge]: https://img.shields.io/badge/go-1.17-blue.svg
|
||||
[go-url]: https://github.com/moovweb/gvm
|
||||
[discord-badge]: https://img.shields.io/discord/669268347736686612.svg
|
||||
[discord-url]: https://discord.gg/cosmosnetwork
|
||||
[license-badge]: https://img.shields.io/github/license/tendermint/tendermint.svg
|
||||
[license-url]: https://github.com/tendermint/tendermint/blob/main/LICENSE
|
||||
[loc-badge]: https://tokei.rs/b1/github/tendermint/tendermint?category=lines
|
||||
[loc-url]: https://github.com/tendermint/tendermint
|
||||
[sg-badge]: https://sourcegraph.com/github.com/tendermint/tendermint/-/badge.svg
|
||||
[sg-url]: https://sourcegraph.com/github.com/tendermint/tendermint?badge
|
||||
[tests-badge]: https://github.com/tendermint/tendermint/workflows/Tests/badge.svg?branch=main
|
||||
[codecov-badge]: https://codecov.io/gh/tendermint/tendermint/branch/main/graph/badge.svg
|
||||
[codecov-url]: https://codecov.io/gh/tendermint/tendermint
|
||||
[lint-badge]: https://github.com/tendermint/tendermint/workflows/Lint/badge.svg
|
||||
|
||||
345
RELEASES.md
Normal file
345
RELEASES.md
Normal file
@@ -0,0 +1,345 @@
|
||||
# Releases
|
||||
|
||||
Tendermint uses modified [semantic versioning](https://semver.org/) with each
|
||||
release following a `vX.Y.Z` format. Tendermint is currently on major version 0
|
||||
and uses the minor version to signal breaking changes. The `main` branch is
|
||||
used for active development and thus it is not advisable to build against it.
|
||||
|
||||
The latest changes are always initially merged into `main`. Releases are
|
||||
specified using tags and are built from long-lived "backport" branches that are
|
||||
cut from `main` when the release process begins. Each release "line" (e.g.
|
||||
0.34 or 0.33) has its own long-lived backport branch, and the backport branches
|
||||
have names like `v0.34.x` or `v0.33.x` (literally, `x`; it is not a placeholder
|
||||
in this case). Tendermint only maintains the last two releases at a time (the
|
||||
oldest release is predominantly just security patches).
|
||||
|
||||
## Backporting
|
||||
|
||||
As non-breaking changes land on `main`, they should also be backported to
|
||||
these backport branches.
|
||||
|
||||
We use Mergify's [backport feature](https://mergify.io/features/backports) to
|
||||
automatically backport to the needed branch. There should be a label for any
|
||||
backport branch that you'll be targeting. To notify the bot to backport a pull
|
||||
request, mark the pull request with the label corresponding to the correct
|
||||
backport branch. For example, to backport to v0.35.x, add the label
|
||||
`S:backport-to-v0.35.x`. Once the original pull request is merged, the bot will
|
||||
try to cherry-pick the pull request to the backport branch. If the bot fails to
|
||||
backport, it will open a pull request. The author of the original pull request
|
||||
is responsible for solving the conflicts and merging the pull request.
|
||||
|
||||
### Creating a backport branch
|
||||
|
||||
If this is the first release candidate for a minor version release, e.g.
|
||||
v0.25.0, you get to have the honor of creating the backport branch!
|
||||
|
||||
Note that, after creating the backport branch, you'll also need to update the
|
||||
tags on `main` so that `go mod` is able to order the branches correctly. You
|
||||
should tag `main` with a "dev" tag that is "greater than" the backport
|
||||
branches tags. See [#6072](https://github.com/tendermint/tendermint/pull/6072)
|
||||
for more context.
|
||||
|
||||
In the following example, we'll assume that we're making a backport branch for
|
||||
the 0.35.x line.
|
||||
|
||||
1. Start on `main`
|
||||
|
||||
2. Create and push the backport branch:
|
||||
```sh
|
||||
git checkout -b v0.35.x
|
||||
git push origin v0.35.x
|
||||
```
|
||||
|
||||
3. Create a PR to update the documentation directory for the backport branch.
|
||||
|
||||
We only maintain RFC and ADR documents on main, to avoid confusion. In
|
||||
addition, we rewrite Markdown URLs pointing to main to point to the
|
||||
backport branch, so that generated documentation will link to the correct
|
||||
versions of files elsewhere in the repository. For context on the latter, see
|
||||
https://github.com/tendermint/tendermint/issues/7675.
|
||||
|
||||
To prepare the PR:
|
||||
```sh
|
||||
# Remove the RFC and ADR documents from the backport.
|
||||
# We only maintain these on main to avoid confusion.
|
||||
git rm -r docs/rfc docs/architecture
|
||||
|
||||
# Update absolute links to point to the backport.
|
||||
go run ./scripts/linkpatch -recur -target v0.35.x -skip-path docs/DOCS_README.md,docs/README.md docs
|
||||
|
||||
# Create and push the PR.
|
||||
git checkout -b update-docs-v035x
|
||||
git commit -m "Update docs for v0.35.x backport branch." docs
|
||||
git push -u origin update-docs-v035x
|
||||
```
|
||||
|
||||
Be sure to merge this PR before making other changes on the newly-created
|
||||
backport branch.
|
||||
|
||||
After doing these steps, go back to `main` and do the following:
|
||||
|
||||
1. Tag `main` as the dev branch for the _next_ minor version release and push
|
||||
it up to GitHub.
|
||||
For example:
|
||||
```sh
|
||||
git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36."
|
||||
git push origin v0.36.0-dev
|
||||
```
|
||||
|
||||
2. Create a new workflow to run e2e nightlies for the new backport branch. (See
|
||||
[e2e-nightly-main.yml][e2e] for an example.)
|
||||
|
||||
3. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the
|
||||
backport bot to work on this branch, and add a corresponding `S:backport-to-v0.35.x`
|
||||
[label](https://github.com/tendermint/tendermint/labels) so the bot can be triggered.
|
||||
|
||||
4. Add a new section to the Dependabot config (`.github/dependabot.yml`) to
|
||||
enable automatic update of Go dependencies on this branch. Copy and edit one
|
||||
of the existing branch configurations to set the correct `target-branch`.
|
||||
|
||||
[e2e]: https://github.com/tendermint/tendermint/blob/main/.github/workflows/e2e-nightly-main.yml
|
||||
|
||||
## Release candidates
|
||||
|
||||
Before creating an official release, especially a minor release, we may want to
|
||||
create a release candidate (RC) for our friends and partners to test out. We use
|
||||
git tags to create RCs, and we build them off of backport branches.
|
||||
|
||||
Tags for RCs should follow the "standard" release naming conventions, with
|
||||
`-rcX` at the end (for example, `v0.35.0-rc0`).
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important
|
||||
that these branches have distinct names from the tags/release names.)
|
||||
|
||||
If this is the first RC for a minor release, you'll have to make a new backport
|
||||
branch (see above). Otherwise:
|
||||
|
||||
1. Start from the backport branch (e.g. `v0.35.x`).
|
||||
2. Run the integration tests and the e2e nightlies
|
||||
(which can be triggered from the Github UI;
|
||||
e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml).
|
||||
3. Prepare the changelog:
|
||||
- Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`. Each RC should have
|
||||
it's own changelog section. These will be squashed when the final candidate is released.
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary.
|
||||
Check the changelog for breaking changes in these components.
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes have landed on the backport branch, be sure to pull them back down locally.
|
||||
6. Once you have the changes locally, create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0`
|
||||
7. Push the tag back up to origin:
|
||||
`git push origin v0.35.0-rc0`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
8. Future RCs will continue to be built off of this branch.
|
||||
|
||||
Note that this process should only be used for "true" RCs -- release candidates
|
||||
that, if successful, will be the next release. For more experimental "RCs,"
|
||||
create a new, short-lived branch and tag that instead.
|
||||
|
||||
## Minor release
|
||||
|
||||
This minor release process assumes that this release was preceded by release
|
||||
candidates. If there were no release candidates, begin by creating a backport
|
||||
branch, as described above.
|
||||
|
||||
Before performing these steps, be sure the
|
||||
[Minor Release Checklist](#minor-release-checklist) has been completed.
|
||||
|
||||
1. Start on the backport branch (e.g. `v0.35.x`)
|
||||
2. Run integration tests (`make test_integrations`) and the e2e nightlies.
|
||||
3. Prepare the release:
|
||||
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
||||
and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
(Squashing includes both combining all entries, as well as removing or simplifying
|
||||
any intra-RC changes. It may also help to alphabetize the entries by package name.)
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
4. Open a PR with these changes against the backport branch.
|
||||
5. Once these changes are on the backport branch, push a tag with prepared release details.
|
||||
This will trigger the actual release `v0.35.0`.
|
||||
- `git tag -a v0.35.0 -m 'Release v0.35.0'`
|
||||
- `git push origin v0.35.0`
|
||||
6. Make sure that `main` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
7. Add the release to the documentation site generator config (see
|
||||
[DOCS_README.md](./docs/DOCS_README.md) for more details). In summary:
|
||||
- Start on branch `main`.
|
||||
- Add a new line at the bottom of [`docs/versions`](./docs/versions) to
|
||||
ensure the newest release is the default for the landing page.
|
||||
- Add a new entry to `themeConfig.versions` in
|
||||
[`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the
|
||||
release in the dropdown versions menu.
|
||||
- Commit these changes to `main` and backport them into the backport
|
||||
branch for this release.
|
||||
|
||||
## Patch release
|
||||
|
||||
Patch releases are done differently from minor releases: They are built off of
|
||||
long-lived backport branches, rather than from main. As non-breaking changes
|
||||
land on `main`, they should also be backported into these backport branches.
|
||||
|
||||
Patch releases don't have release candidates by default, although any tricky
|
||||
changes may merit a release candidate.
|
||||
|
||||
To create a patch release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout v0.35.x`
|
||||
2. Run integration tests (`make test_integrations`) and the nightlies.
|
||||
3. Check out a new branch and prepare the release:
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump the TMDefaultVersion in `version.go`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during patch releases, and only field additions are valid patch changes.)
|
||||
4. Open a PR with these changes that will land them back on `v0.35.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a v0.35.1 -m 'Release v0.35.1'`
|
||||
- `git push origin v0.35.1`
|
||||
6. Create a pull request back to main with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:patch` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into main.
|
||||
|
||||
## Minor Release Checklist
|
||||
|
||||
The following set of steps are performed on all releases that increment the
|
||||
_minor_ version, e.g. v0.25 to v0.26. These steps ensure that Tendermint is well
|
||||
tested, stable, and suitable for adoption by the various diverse projects that
|
||||
rely on Tendermint.
|
||||
|
||||
### Feature Freeze
|
||||
|
||||
Ahead of any minor version release of Tendermint, the software enters 'Feature
|
||||
Freeze' for at least two weeks. A feature freeze means that _no_ new features
|
||||
are added to the code being prepared for release. No code changes should be made
|
||||
to the code being released that do not directly improve pressing issues of code
|
||||
quality. The following must not be merged during a feature freeze:
|
||||
|
||||
* Refactors that are not related to specific bug fixes.
|
||||
* Dependency upgrades.
|
||||
* New test code that does not test a discovered regression.
|
||||
* New features of any kind.
|
||||
* Documentation or spec improvements that are not related to the newly developed
|
||||
code.
|
||||
|
||||
This period directly follows the creation of the [backport
|
||||
branch](#creating-a-backport-branch). The Tendermint team instead directs all
|
||||
attention to ensuring that the existing code is stable and reliable. Broken
|
||||
tests are fixed, flakey-tests are remedied, end-to-end test failures are
|
||||
thoroughly diagnosed and all efforts of the team are aimed at improving the
|
||||
quality of the code. During this period, the upgrade harness tests are run
|
||||
repeatedly and a variety of in-house testnets are run to ensure Tendermint
|
||||
functions at the scale it will be used by application developers and node
|
||||
operators.
|
||||
|
||||
### Nightly End-To-End Tests
|
||||
|
||||
The Tendermint team maintains [a set of end-to-end
|
||||
tests](https://github.com/tendermint/tendermint/blob/main/test/e2e/README.md#L1)
|
||||
that run each night on the latest commit of the project and on the code in the
|
||||
tip of each supported backport branch. These tests start a network of
|
||||
containerized Tendermint processes and run automated checks that the network
|
||||
functions as expected in both stable and unstable conditions. During the feature
|
||||
freeze, these tests are run nightly and must pass consistently for a release of
|
||||
Tendermint to be considered stable.
|
||||
|
||||
### Upgrade Harness
|
||||
|
||||
> TODO(williambanfield): Change to past tense and clarify this section once
|
||||
> upgrade harness is complete.
|
||||
|
||||
The Tendermint team is creating an upgrade test harness to exercise the workflow
|
||||
of stopping an instance of Tendermint running one version of the software and
|
||||
starting up the same application running the next version. To support upgrade
|
||||
testing, we will add the ability to terminate the Tendermint process at specific
|
||||
pre-defined points in its execution so that we can verify upgrades work in a
|
||||
representative sample of stop conditions.
|
||||
|
||||
### Large Scale Testnets
|
||||
|
||||
The Tendermint end-to-end tests run a small network (~10s of nodes) to exercise
|
||||
basic consensus interactions. Real world deployments of Tendermint often have
|
||||
over a hundred nodes just in the validator set, with many others acting as full
|
||||
nodes and sentry nodes. To gain more assurance before a release, we will also
|
||||
run larger-scale test networks to shake out emergent behaviors at scale.
|
||||
|
||||
Large-scale test networks are run on a set of virtual machines (VMs). Each VM is
|
||||
equipped with 4 Gigabytes of RAM and 2 CPU cores. The network runs a very simple
|
||||
key-value store application. The application adds artificial delays to different
|
||||
ABCI calls to simulate a slow application. Each testnet is briefly run with no
|
||||
load being generated to collect a baseline performance. Once baseline is
|
||||
captured, a consistent load is applied across the network. This load takes the
|
||||
form of 10% of the running nodes all receiving a consistent stream of two
|
||||
hundred transactions per minute each.
|
||||
|
||||
During each test net, the following metrics are monitored and collected on each
|
||||
node:
|
||||
|
||||
* Consensus rounds per height
|
||||
* Maximum connected peers, Minimum connected peers, Rate of change of peer connections
|
||||
* Memory resident set size
|
||||
* CPU utilization
|
||||
* Blocks produced per minute
|
||||
* Seconds for each step of consensus (Propose, Prevote, Precommit, Commit)
|
||||
* Latency to receive block proposals
|
||||
|
||||
For these tests we intentionally target low-powered host machines (with low core
|
||||
counts and limited memory) to ensure we observe similar kinds of resource contention
|
||||
and limitation that real-world deployments of Tendermint experience in production.
|
||||
|
||||
#### 200 Node Testnet
|
||||
|
||||
To test the stability and performance of Tendermint in a real world scenario,
|
||||
a 200 node test network is run. The network comprises 5 seed nodes, 100
|
||||
validators and 95 non-validating full nodes. All nodes begin by dialing
|
||||
a subset of the seed nodes to discover peers. The network is run for several
|
||||
days, with metrics being collected continuously. In cases of changes to performance
|
||||
critical systems, testnets of larger sizes should be considered.
|
||||
|
||||
#### Rotating Node Testnet
|
||||
|
||||
Real-world deployments of Tendermint frequently see new nodes arrive and old
|
||||
nodes exit the network. The rotating node testnet ensures that Tendermint is
|
||||
able to handle this reliably. In this test, a network with 10 validators and
|
||||
3 seed nodes is started. A rolling set of 25 full nodes are started and each
|
||||
connects to the network by dialing one of the seed nodes. Once the node is able
|
||||
to blocksync to the head of the chain and begins producing blocks using
|
||||
Tendermint consensus it is stopped. Once stopped, a new node is started and
|
||||
takes its place. This network is run for several days.
|
||||
|
||||
#### Network Partition Testnet
|
||||
|
||||
Tendermint is expected to recover from network partitions. A partition where no
|
||||
subset of the nodes is left with the super-majority of the stake is expected to
|
||||
stop making blocks. Upon alleviation of the partition, the network is expected
|
||||
to once again become fully connected and capable of producing blocks. The
|
||||
network partition testnet ensures that Tendermint is able to handle this
|
||||
reliably at scale. In this test, a network with 100 validators and 95 full
|
||||
nodes is started. All validators have equal stake. Once the network is
|
||||
producing blocks, a set of firewall rules is deployed to create a partitioned
|
||||
network with 50% of the stake on one side and 50% on the other. Once the
|
||||
network stops producing blocks, the firewall rules are removed and the nodes
|
||||
are monitored to ensure they reconnect and that the network again begins
|
||||
producing blocks.
|
||||
|
||||
#### Absent Stake Testnet
|
||||
|
||||
Tendermint networks often run with _some_ portion of the voting power offline.
|
||||
The absent stake testnet ensures that large networks are able to handle this
|
||||
reliably. A set of 150 validator nodes and three seed nodes is started. The set
|
||||
of 150 validators is configured to only possess a cumulative stake of 67% of
|
||||
the total stake. The remaining 33% of the stake is configured to belong to
|
||||
a validator that is never actually run in the test network. The network is run
|
||||
for multiple days, ensuring that it is able to produce blocks without issue.
|
||||
153
SECURITY.md
153
SECURITY.md
@@ -2,98 +2,146 @@
|
||||
|
||||
## Reporting a Bug
|
||||
|
||||
As part of our [Coordinated Vulnerability Disclosure
|
||||
Policy](https://tendermint.com/security), we operate a [bug
|
||||
bounty](https://hackerone.com/tendermint).
|
||||
See the policy for more details on submissions and rewards, and see "Example Vulnerabilities" (below) for examples of the kinds of bugs we're most interested in.
|
||||
As part of our [Coordinated Vulnerability Disclosure Policy](https://tendermint.com/security),
|
||||
we operate a [bug bounty][hackerone]. See the policy for more
|
||||
details on submissions and rewards, and see "Example Vulnerabilities" (below)
|
||||
for examples of the kinds of bugs we're most interested in.
|
||||
|
||||
### Guidelines
|
||||
### Guidelines
|
||||
|
||||
We require that all researchers:
|
||||
|
||||
* Use the bug bounty to disclose all vulnerabilities, and avoid posting vulnerability information in public places, including Github Issues, Discord channels, and Telegram groups
|
||||
* Make every effort to avoid privacy violations, degradation of user experience, disruption to production systems (including but not limited to the Cosmos Hub), and destruction of data
|
||||
* Keep any information about vulnerabilities that you’ve discovered confidential between yourself and the Tendermint Core engineering team until the issue has been resolved and disclosed
|
||||
* Use the bug bounty to disclose all vulnerabilities, and avoid posting
|
||||
vulnerability information in public places, including Github Issues, Discord
|
||||
channels, and Telegram groups
|
||||
* Make every effort to avoid privacy violations, degradation of user experience,
|
||||
disruption to production systems (including but not limited to the Cosmos
|
||||
Hub), and destruction of data
|
||||
* Keep any information about vulnerabilities that you’ve discovered confidential
|
||||
between yourself and the Tendermint Core engineering team until the issue has
|
||||
been resolved and disclosed
|
||||
* Avoid posting personally identifiable information, privately or publicly
|
||||
|
||||
If you follow these guidelines when reporting an issue to us, we commit to:
|
||||
|
||||
* Not pursue or support any legal action related to your research on this vulnerability
|
||||
* Work with you to understand, resolve and ultimately disclose the issue in a timely fashion
|
||||
* Not pursue or support any legal action related to your research on this
|
||||
vulnerability
|
||||
* Work with you to understand, resolve and ultimately disclose the issue in a
|
||||
timely fashion
|
||||
|
||||
## Disclosure Process
|
||||
## Disclosure Process
|
||||
|
||||
Tendermint Core uses the following disclosure process:
|
||||
|
||||
1. Once a security report is received, the Tendermint Core team works to verify the issue and confirm its severity level using CVSS.
|
||||
2. The Tendermint Core team collaborates with the Gaia team to determine the vulnerability’s potential impact on the Cosmos Hub.
|
||||
3. Patches are prepared for eligible releases of Tendermint in private repositories. See “Supported Releases” below for more information on which releases are considered eligible.
|
||||
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE Numbering Authority.
|
||||
5. We notify the community that a security release is coming, to give users time to prepare their systems for the update. Notifications can include forum posts, tweets, and emails to partners and validators, including emails sent to the [Tendermint Security Mailing List](https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc).
|
||||
6. 24 hours following this notification, the fixes are applied publicly and new releases are issued.
|
||||
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these releases, and then themselves issue new releases.
|
||||
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we notify the community, again, through the same channels as above. We also publish a Security Advisory on Github and publish the CVE, as long as neither the Security Advisory nor the CVE include any information on how to exploit these vulnerabilities beyond what information is already available in the patch itself.
|
||||
9. Once the community is notified, we will pay out any relevant bug bounties to submitters.
|
||||
10. One week after the releases go out, we will publish a post with further details on the vulnerability as well as our response to it.
|
||||
1. Once a security report is received, the Tendermint Core team works to verify
|
||||
the issue and confirm its severity level using CVSS.
|
||||
2. The Tendermint Core team collaborates with the Gaia team to determine the
|
||||
vulnerability’s potential impact on the Cosmos Hub.
|
||||
3. Patches are prepared for eligible releases of Tendermint in private
|
||||
repositories. See “Supported Releases” below for more information on which
|
||||
releases are considered eligible.
|
||||
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE
|
||||
Numbering Authority.
|
||||
5. We notify the community that a security release is coming, to give users time
|
||||
to prepare their systems for the update. Notifications can include forum
|
||||
posts, tweets, and emails to partners and validators, including emails sent
|
||||
to the [Tendermint Security Mailing List][tmsec-mailing].
|
||||
6. 24 hours following this notification, the fixes are applied publicly and new
|
||||
releases are issued.
|
||||
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these
|
||||
releases, and then themselves issue new releases.
|
||||
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we
|
||||
notify the community, again, through the same channels as above. We also
|
||||
publish a Security Advisory on Github and publish the CVE, as long as neither
|
||||
the Security Advisory nor the CVE include any information on how to exploit
|
||||
these vulnerabilities beyond what information is already available in the
|
||||
patch itself.
|
||||
9. Once the community is notified, we will pay out any relevant bug bounties to
|
||||
submitters.
|
||||
10. One week after the releases go out, we will publish a post with further
|
||||
details on the vulnerability as well as our response to it.
|
||||
|
||||
This process can take some time. Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow the process described above to ensure that disclosures are handled consistently and to keep Tendermint Core and its downstream dependent projects--including but not limited to Gaia and the Cosmos Hub--as secure as possible.
|
||||
This process can take some time. Every effort will be made to handle the bug in
|
||||
as timely a manner as possible, however it's important that we follow the
|
||||
process described above to ensure that disclosures are handled consistently and
|
||||
to keep Tendermint Core and its downstream dependent projects--including but not
|
||||
limited to Gaia and the Cosmos Hub--as secure as possible.
|
||||
|
||||
### Example Timeline
|
||||
### Example Timeline
|
||||
|
||||
The following is an example timeline for the triage and response. The required roles and team members are described in parentheses after each task; however, multiple people can play each role and each person may play multiple roles.
|
||||
The following is an example timeline for the triage and response. The required
|
||||
roles and team members are described in parentheses after each task; however,
|
||||
multiple people can play each role and each person may play multiple roles.
|
||||
|
||||
#### > 24 Hours Before Release Time
|
||||
#### 24+ Hours Before Release Time
|
||||
|
||||
1. Request CVE number (ADMIN)
|
||||
2. Gather emails and other contact info for validators (COMMS LEAD)
|
||||
3. Test fixes on a testnet (TENDERMINT ENG, COSMOS ENG)
|
||||
4. Write “Security Advisory” for forum (TENDERMINT LEAD)
|
||||
1. Request CVE number (ADMIN)
|
||||
2. Gather emails and other contact info for validators (COMMS LEAD)
|
||||
3. Create patches in a private security repo, and ensure that PRs are open
|
||||
targeting all relevant release branches (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
4. Test fixes on a testnet (TENDERMINT ENG, COSMOS SDK ENG)
|
||||
5. Write “Security Advisory” for forum (TENDERMINT LEAD)
|
||||
|
||||
#### 24 Hours Before Release Time
|
||||
|
||||
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
|
||||
2. Post Tweet linking to forum post (COMMS LEAD)
|
||||
3. Announce security advisory/link to post in various other social channels (Telegram, Discord) (COMMS LEAD)
|
||||
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
|
||||
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
|
||||
2. Post Tweet linking to forum post (COMMS LEAD)
|
||||
3. Announce security advisory/link to post in various other social channels
|
||||
(Telegram, Discord) (COMMS LEAD)
|
||||
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
|
||||
|
||||
#### Release Time
|
||||
|
||||
1. Cut Tendermint releases for eligible versions (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
1. Cut Tendermint releases for eligible versions (TENDERMINT ENG, TENDERMINT
|
||||
LEAD)
|
||||
2. Cut Cosmos SDK release for eligible versions (COSMOS ENG)
|
||||
3. Cut Gaia release for eligible versions (GAIA ENG)
|
||||
4. Post “Security releases” on forum (TENDERMINT LEAD)
|
||||
5. Post new Tweet linking to forum post (COMMS LEAD)
|
||||
6. Remind everyone via social channels (Telegram, Discord) that the release is out (COMMS LEAD)
|
||||
7. Send emails to validators or other users (COMMS LEAD)
|
||||
8. Publish Security Advisory and CVE, if CVE has no sensitive information (ADMIN)
|
||||
6. Remind everyone via social channels (Telegram, Discord) that the release is
|
||||
out (COMMS LEAD)
|
||||
7. Send emails to validators or other users (COMMS LEAD)
|
||||
8. Publish Security Advisory and CVE, if CVE has no sensitive information
|
||||
(ADMIN)
|
||||
|
||||
#### After Release Time
|
||||
|
||||
1. Write forum post with exploit details (TENDERMINT LEAD)
|
||||
2. Approve pay-out on HackerOne for submitter (ADMIN)
|
||||
2. Approve pay-out on HackerOne for submitter (ADMIN)
|
||||
|
||||
#### 7 Days After Release Time
|
||||
|
||||
1. Publish CVE if it has not yet been published (ADMIN)
|
||||
1. Publish CVE if it has not yet been published (ADMIN)
|
||||
2. Publish forum post with exploit details (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
|
||||
## Supported Releases
|
||||
|
||||
The Tendermint Core team commits to releasing security patch releases for both the latest minor release as well for the major/minor release that the Cosmos Hub is running.
|
||||
The Tendermint Core team commits to releasing security patch releases for both
|
||||
the latest minor release as well for the major/minor release that the Cosmos Hub
|
||||
is running.
|
||||
|
||||
If you are running older versions of Tendermint Core, we encourage you to upgrade at your earliest opportunity so that you can receive security patches directly from the Tendermint repo. While you are welcome to backport security patches to older versions for your own use, we will not publish or promote these backports.
|
||||
If you are running older versions of Tendermint Core, we encourage you to
|
||||
upgrade at your earliest opportunity so that you can receive security patches
|
||||
directly from the Tendermint repo. While you are welcome to backport security
|
||||
patches to older versions for your own use, we will not publish or promote these
|
||||
backports.
|
||||
|
||||
## Scope
|
||||
|
||||
The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/tendermint). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope:
|
||||
The full scope of our bug bounty program is outlined on our
|
||||
[Hacker One program page][hackerone]. Please also note that, in the interest of
|
||||
the safety of our users and staff, a few things are explicitly excluded from
|
||||
scope:
|
||||
|
||||
* Any third-party services
|
||||
* Findings from physical testing, such as office access
|
||||
* Any third-party services
|
||||
* Findings from physical testing, such as office access
|
||||
* Findings derived from social engineering (e.g., phishing)
|
||||
|
||||
## Example Vulnerabilities
|
||||
## Example Vulnerabilities
|
||||
|
||||
The following is a list of examples of the kinds of vulnerabilities that we’re most interested in. It is not exhaustive: there are other kinds of issues we may also be interested in!
|
||||
The following is a list of examples of the kinds of vulnerabilities that we’re
|
||||
most interested in. It is not exhaustive: there are other kinds of issues we may
|
||||
also be interested in!
|
||||
|
||||
### Specification
|
||||
|
||||
@@ -105,7 +153,8 @@ The following is a list of examples of the kinds of vulnerabilities that we’re
|
||||
|
||||
Assuming less than 1/3 of the voting power is Byzantine (malicious):
|
||||
|
||||
* Validation of blockchain data structures, including blocks, block parts, votes, and so on
|
||||
* Validation of blockchain data structures, including blocks, block parts,
|
||||
votes, and so on
|
||||
* Execution of blocks
|
||||
* Validator set changes
|
||||
* Proposer round robin
|
||||
@@ -114,6 +163,9 @@ Assuming less than 1/3 of the voting power is Byzantine (malicious):
|
||||
* A node halting (liveness failure)
|
||||
* Syncing new and old nodes
|
||||
|
||||
Assuming more than 1/3 the voting power is Byzantine:
|
||||
|
||||
* Attacks that go unpunished (unhandled evidence)
|
||||
|
||||
### Networking
|
||||
|
||||
@@ -139,7 +191,7 @@ Attacks may come through the P2P network or the RPC layer:
|
||||
|
||||
### Libraries
|
||||
|
||||
* Serialization (Amino)
|
||||
* Serialization
|
||||
* Reading/Writing files and databases
|
||||
|
||||
### Cryptography
|
||||
@@ -150,5 +202,8 @@ Attacks may come through the P2P network or the RPC layer:
|
||||
|
||||
### Light Client
|
||||
|
||||
* Core verification
|
||||
* Core verification
|
||||
* Bisection/sequential algorithms
|
||||
|
||||
[hackerone]: https://hackerone.com/cosmos
|
||||
[tmsec-mailing]: https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc
|
||||
|
||||
64
UPGRADING.md
64
UPGRADING.md
@@ -1,37 +1,37 @@
|
||||
# Upgrading Tendermint Core
|
||||
|
||||
This guide provides instructions for upgrading to specific versions of Tendermint Core.
|
||||
This guide provides instructions for upgrading to specific versions of
|
||||
Tendermint Core.
|
||||
|
||||
## Unreleased
|
||||
## v0.37 (Unreleased)
|
||||
|
||||
This version requires a coordinated network upgrade. It alters the elements in
|
||||
the predigest of the `LastResultsHash` and thus all nodes must upgrade together
|
||||
(see [\#9175](https://github.com/tendermint/tendermint/pull/9175)).
|
||||
|
||||
NOTE: v0.35 was recalled and v0.36 was skipped
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
* Added `AbciVersion` to `RequestInfo`. Applications should check that the ABCI version they expect is being used in order to avoid unimplemented changes errors.
|
||||
* In v0.34, messages on the wire used to be length-delimited with `int64` varint
|
||||
values, which was inconsistent with the `uint64` varint length delimiters used
|
||||
in the P2P layer. Both now consistently use `uint64` varint length delimiters.
|
||||
|
||||
* The method `SetOption` has been removed from the ABCI.Client interface. This feature was used in the early ABCI implementation's.
|
||||
## v0.34.20
|
||||
|
||||
* Messages are written to a byte stream using uin64 length delimiters instead of int64.
|
||||
### Feature: Priority Mempool
|
||||
|
||||
### Config Changes
|
||||
This release backports an implementation of the Priority Mempool from the v0.35
|
||||
branch. This implementation of the mempool permits the application to set a
|
||||
priority on each transaction during CheckTx, and during block selection the
|
||||
highest-priority transactions are chosen (subject to the constraints on size
|
||||
and gas cost).
|
||||
|
||||
* `fast_sync = "v1"` is no longer supported. Please use `v2` instead.
|
||||
Operators can enable the priority mempool by setting `mempool.version` to
|
||||
`"v1"` in the `config.toml`. For more technical details about the priority
|
||||
mempool, see [ADR 067: Mempool
|
||||
Refactor](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-067-mempool-refactor.md).
|
||||
|
||||
* All config parameters are now hyphen-case (also known as kebab-case) instead of snake_case. Before restarting the node make sure
|
||||
you have updated all the variables in your `config.toml` file.
|
||||
|
||||
### CLI Changes
|
||||
|
||||
* If you had previously used `tendermint gen_node_key` to generate a new node
|
||||
key, keep in mind that it no longer saves the output to a file. You can use
|
||||
`tendermint init` or pipe the output of `tendermint gen_node_key` to
|
||||
`$TMHOME/config/node_key.json`:
|
||||
|
||||
```
|
||||
$ tendermint gen_node_key > $TMHOME/config/node_key.json
|
||||
```
|
||||
|
||||
* CLI commands and flags are all now hyphen-case instead of snake_case.
|
||||
Make sure to adjust any scripts that calls a cli command with snake_casing
|
||||
## v0.34.0
|
||||
|
||||
**Upgrading to Tendermint 0.34 requires a blockchain restart.**
|
||||
@@ -72,8 +72,6 @@ Note also that Tendermint 0.34 also requires Go 1.15 or higher.
|
||||
* The field `Proof`, on the ABCI type `ResponseQuery`, is now named `ProofOps`.
|
||||
For more, see "Crypto," below.
|
||||
|
||||
* The method `SetOption` has been removed from the ABCI.Client interface. This feature was used in the early ABCI implementation's.
|
||||
|
||||
### P2P Protocol
|
||||
|
||||
The default codec is now proto3, not amino. The schema files can be found in the `/proto`
|
||||
@@ -81,9 +79,12 @@ directory. For more, see "Protobuf," below.
|
||||
|
||||
### Blockchain Protocol
|
||||
|
||||
* `Header#LastResultsHash`, which is the root hash of a Merkle tree built from
|
||||
`ResponseDeliverTx(Code, Data)` as of v0.34 also includes `GasWanted` and `GasUsed`
|
||||
fields.
|
||||
* `Header#LastResultsHash` previously was the root hash of a Merkle tree built from `ResponseDeliverTx(Code, Data)` responses.
|
||||
As of 0.34,`Header#LastResultsHash` is now the root hash of a Merkle tree built from:
|
||||
* `BeginBlock#Events`
|
||||
* Root hash of a Merkle tree built from `ResponseDeliverTx(Code, Data,
|
||||
GasWanted, GasUsed, Events)` responses
|
||||
* `BeginBlock#Events`
|
||||
|
||||
* Merkle hashes of empty trees previously returned nothing, but now return the hash of an empty input,
|
||||
to conform with [RFC-6962](https://tools.ietf.org/html/rfc6962).
|
||||
@@ -187,7 +188,6 @@ Other user-relevant changes include:
|
||||
* The `Verifier` was broken up into two pieces:
|
||||
* Core verification logic (pure `VerifyX` functions)
|
||||
* `Client` object, which represents the complete light client
|
||||
* The new light clients stores headers & validator sets as `LightBlock`s
|
||||
* The RPC client can be found in the `/rpc` directory.
|
||||
* The HTTP(S) proxy is located in the `/proxy` directory.
|
||||
|
||||
@@ -452,7 +452,7 @@ use `make build_c` / `make install_c` (full instructions can be found at
|
||||
|
||||
## v0.31.0
|
||||
|
||||
This release contains a breaking change to the behaviour of the pubsub system.
|
||||
This release contains a breaking change to the behavior of the pubsub system.
|
||||
It also contains some minor breaking changes in the Go API and ABCI.
|
||||
There are no changes to the block or p2p protocols, so v0.31.0 should work fine
|
||||
with blockchains created from the v0.30 series.
|
||||
@@ -470,7 +470,7 @@ In this case, the WS client will receive an error with description:
|
||||
"error": {
|
||||
"code": -32000,
|
||||
"msg": "Server error",
|
||||
"data": "subscription was cancelled (reason: client is not pulling messages fast enough)" // or "subscription was cancelled (reason: Tendermint exited)"
|
||||
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -651,7 +651,7 @@ to `timeout_propose = "3s"`.
|
||||
|
||||
### RPC Changes
|
||||
|
||||
The default behaviour of `/abci_query` has been changed to not return a proof,
|
||||
The default behavior of `/abci_query` has been changed to not return a proof,
|
||||
and the name of the parameter that controls this has been changed from `trusted`
|
||||
to `prove`. To get proofs with your queries, ensure you set `prove=true`.
|
||||
|
||||
|
||||
66
Vagrantfile
vendored
Normal file
66
Vagrantfile
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "ubuntu/focal64"
|
||||
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
v.memory = 4096
|
||||
v.cpus = 2
|
||||
end
|
||||
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
apt-get update
|
||||
|
||||
# install base requirements
|
||||
apt-get install -y --no-install-recommends wget curl jq zip \
|
||||
make shellcheck bsdmainutils psmisc
|
||||
apt-get install -y language-pack-en
|
||||
|
||||
# install docker
|
||||
apt-get install -y --no-install-recommends apt-transport-https \
|
||||
ca-certificates \
|
||||
curl \
|
||||
gnupg-agent \
|
||||
software-properties-common
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
||||
add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable"
|
||||
apt-get update
|
||||
apt-get install -y docker-ce
|
||||
usermod -aG docker vagrant
|
||||
|
||||
# install go
|
||||
wget -q https://dl.google.com/go/go1.15.linux-amd64.tar.gz
|
||||
tar -xvf go1.15.linux-amd64.tar.gz
|
||||
mv go /usr/local
|
||||
rm -f go1.15.linux-amd64.tar.gz
|
||||
|
||||
# install nodejs (for docs)
|
||||
curl -sL https://deb.nodesource.com/setup_11.x | bash -
|
||||
apt-get install -y nodejs
|
||||
|
||||
# cleanup
|
||||
apt-get autoremove -y
|
||||
|
||||
# set env variables
|
||||
echo 'export GOROOT=/usr/local/go' >> /home/vagrant/.bash_profile
|
||||
echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.bash_profile
|
||||
echo 'export PATH=$PATH:$GOROOT/bin:$GOPATH/bin' >> /home/vagrant/.bash_profile
|
||||
echo 'export LC_ALL=en_US.UTF-8' >> /home/vagrant/.bash_profile
|
||||
echo 'cd go/src/github.com/tendermint/tendermint' >> /home/vagrant/.bash_profile
|
||||
|
||||
mkdir -p /home/vagrant/go/bin
|
||||
mkdir -p /home/vagrant/go/src/github.com/tendermint
|
||||
ln -s /vagrant /home/vagrant/go/src/github.com/tendermint/tendermint
|
||||
|
||||
chown -R vagrant:vagrant /home/vagrant/go
|
||||
chown vagrant:vagrant /home/vagrant/.bash_profile
|
||||
|
||||
# get all deps and tools, ready to install/test
|
||||
su - vagrant -c 'source /home/vagrant/.bash_profile'
|
||||
su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make tools'
|
||||
SHELL
|
||||
end
|
||||
@@ -20,7 +20,7 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g
|
||||
A detailed description of the ABCI methods and message types is contained in:
|
||||
|
||||
- [The main spec](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md)
|
||||
- [A protobuf file](../proto/tendermint/abci/types.proto)
|
||||
- [A protobuf file](./types/types.proto)
|
||||
- [A Go interface](./types/application.go)
|
||||
|
||||
## Protocol Buffers
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
@@ -15,53 +14,48 @@ const (
|
||||
echoRetryIntervalSeconds = 1
|
||||
)
|
||||
|
||||
//go:generate mockery --case underscore --name Client
|
||||
|
||||
// Client defines an interface for an ABCI client.
|
||||
//
|
||||
// All `Async` methods return a `ReqRes` object and an error.
|
||||
// All `Async` methods return a `ReqRes` object.
|
||||
// All `Sync` methods return the appropriate protobuf ResponseXxx struct and an error.
|
||||
//
|
||||
// NOTE these are client errors, eg. ABCI socket connectivity issues.
|
||||
// Application-related errors are reflected in response via ABCI error codes
|
||||
// and logs.
|
||||
// Note these are client errors, eg. ABCI socket connectivity issues.
|
||||
// Application-related errors are reflected in response via ABCI error codes and logs.
|
||||
type Client interface {
|
||||
service.Service
|
||||
|
||||
SetResponseCallback(Callback)
|
||||
Error() error
|
||||
|
||||
// Asynchronous requests
|
||||
FlushAsync(context.Context) (*ReqRes, error)
|
||||
EchoAsync(ctx context.Context, msg string) (*ReqRes, error)
|
||||
InfoAsync(context.Context, types.RequestInfo) (*ReqRes, error)
|
||||
DeliverTxAsync(context.Context, types.RequestDeliverTx) (*ReqRes, error)
|
||||
CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error)
|
||||
QueryAsync(context.Context, types.RequestQuery) (*ReqRes, error)
|
||||
CommitAsync(context.Context) (*ReqRes, error)
|
||||
InitChainAsync(context.Context, types.RequestInitChain) (*ReqRes, error)
|
||||
BeginBlockAsync(context.Context, types.RequestBeginBlock) (*ReqRes, error)
|
||||
EndBlockAsync(context.Context, types.RequestEndBlock) (*ReqRes, error)
|
||||
ListSnapshotsAsync(context.Context, types.RequestListSnapshots) (*ReqRes, error)
|
||||
OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error)
|
||||
LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error)
|
||||
ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error)
|
||||
FlushAsync() *ReqRes
|
||||
EchoAsync(msg string) *ReqRes
|
||||
InfoAsync(types.RequestInfo) *ReqRes
|
||||
SetOptionAsync(types.RequestSetOption) *ReqRes
|
||||
DeliverTxAsync(types.RequestDeliverTx) *ReqRes
|
||||
CheckTxAsync(types.RequestCheckTx) *ReqRes
|
||||
QueryAsync(types.RequestQuery) *ReqRes
|
||||
CommitAsync() *ReqRes
|
||||
InitChainAsync(types.RequestInitChain) *ReqRes
|
||||
BeginBlockAsync(types.RequestBeginBlock) *ReqRes
|
||||
EndBlockAsync(types.RequestEndBlock) *ReqRes
|
||||
ListSnapshotsAsync(types.RequestListSnapshots) *ReqRes
|
||||
OfferSnapshotAsync(types.RequestOfferSnapshot) *ReqRes
|
||||
LoadSnapshotChunkAsync(types.RequestLoadSnapshotChunk) *ReqRes
|
||||
ApplySnapshotChunkAsync(types.RequestApplySnapshotChunk) *ReqRes
|
||||
|
||||
// Synchronous requests
|
||||
FlushSync(context.Context) error
|
||||
EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error)
|
||||
InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
|
||||
DeliverTxSync(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
|
||||
CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||
QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
|
||||
CommitSync(context.Context) (*types.ResponseCommit, error)
|
||||
InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
|
||||
EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error)
|
||||
ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
||||
OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
||||
LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
||||
ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
||||
FlushSync() error
|
||||
EchoSync(msg string) (*types.ResponseEcho, error)
|
||||
InfoSync(types.RequestInfo) (*types.ResponseInfo, error)
|
||||
SetOptionSync(types.RequestSetOption) (*types.ResponseSetOption, error)
|
||||
DeliverTxSync(types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
|
||||
CheckTxSync(types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||
QuerySync(types.RequestQuery) (*types.ResponseQuery, error)
|
||||
CommitSync() (*types.ResponseCommit, error)
|
||||
InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
BeginBlockSync(types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
|
||||
EndBlockSync(types.RequestEndBlock) (*types.ResponseEndBlock, error)
|
||||
ListSnapshotsSync(types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
||||
OfferSnapshotSync(types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
||||
LoadSnapshotChunkSync(types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
||||
ApplySnapshotChunkSync(types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -80,20 +74,22 @@ func NewClient(addr, transport string, mustConnect bool) (client Client, err err
|
||||
return
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
type Callback func(*types.Request, *types.Response)
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
type ReqRes struct {
|
||||
*types.Request
|
||||
*sync.WaitGroup
|
||||
*types.Response // Not set atomically, so be sure to use WaitGroup.
|
||||
|
||||
mtx tmsync.Mutex
|
||||
done bool // Gets set to true once *after* WaitGroup.Done().
|
||||
cb func(*types.Response) // A single callback that may be set.
|
||||
mtx tmsync.Mutex
|
||||
|
||||
// callbackInvoked as a variable to track if the callback was already
|
||||
// invoked during the regular execution of the request. This variable
|
||||
// allows clients to set the callback simultaneously without potentially
|
||||
// invoking the callback twice by accident, once when 'SetCallback' is
|
||||
// called and once during the normal request.
|
||||
callbackInvoked bool
|
||||
cb func(*types.Response) // A single callback that may be set.
|
||||
}
|
||||
|
||||
func NewReqRes(req *types.Request) *ReqRes {
|
||||
@@ -102,39 +98,49 @@ func NewReqRes(req *types.Request) *ReqRes {
|
||||
WaitGroup: waitGroup1(),
|
||||
Response: nil,
|
||||
|
||||
done: false,
|
||||
cb: nil,
|
||||
callbackInvoked: false,
|
||||
cb: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// Sets the callback for this ReqRes atomically.
|
||||
// If reqRes is already done, calls cb immediately.
|
||||
// NOTE: reqRes.cb should not change if reqRes.done.
|
||||
// NOTE: only one callback is supported.
|
||||
func (reqRes *ReqRes) SetCallback(cb func(res *types.Response)) {
|
||||
reqRes.mtx.Lock()
|
||||
// Sets sets the callback. If reqRes is already done, it will call the cb
|
||||
// immediately. Note, reqRes.cb should not change if reqRes.done and only one
|
||||
// callback is supported.
|
||||
func (r *ReqRes) SetCallback(cb func(res *types.Response)) {
|
||||
r.mtx.Lock()
|
||||
|
||||
if reqRes.done {
|
||||
reqRes.mtx.Unlock()
|
||||
cb(reqRes.Response)
|
||||
if r.callbackInvoked {
|
||||
r.mtx.Unlock()
|
||||
cb(r.Response)
|
||||
return
|
||||
}
|
||||
|
||||
reqRes.cb = cb
|
||||
reqRes.mtx.Unlock()
|
||||
r.cb = cb
|
||||
r.mtx.Unlock()
|
||||
}
|
||||
|
||||
func (reqRes *ReqRes) GetCallback() func(*types.Response) {
|
||||
reqRes.mtx.Lock()
|
||||
defer reqRes.mtx.Unlock()
|
||||
return reqRes.cb
|
||||
// InvokeCallback invokes a thread-safe execution of the configured callback
|
||||
// if non-nil.
|
||||
func (r *ReqRes) InvokeCallback() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
if r.cb != nil {
|
||||
r.cb(r.Response)
|
||||
}
|
||||
r.callbackInvoked = true
|
||||
}
|
||||
|
||||
// NOTE: it should be safe to read reqRes.cb without locks after this.
|
||||
func (reqRes *ReqRes) SetDone() {
|
||||
reqRes.mtx.Lock()
|
||||
reqRes.done = true
|
||||
reqRes.mtx.Unlock()
|
||||
// GetCallback returns the configured callback of the ReqRes object which may be
|
||||
// nil. Note, it is not safe to concurrently call this in cases where it is
|
||||
// marked done and SetCallback is called before calling GetCallback as that
|
||||
// will invoke the callback twice and create a potential race condition.
|
||||
//
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5439
|
||||
func (r *ReqRes) GetCallback() func(*types.Response) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
return r.cb
|
||||
}
|
||||
|
||||
func waitGroup1() (wg *sync.WaitGroup) {
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
// Package abcicli provides an ABCI implementation in Go.
|
||||
//
|
||||
// There are 3 clients available:
|
||||
// 1. socket (unix or TCP)
|
||||
// 2. local (in memory)
|
||||
// 3. gRPC
|
||||
//
|
||||
// ## Socket client
|
||||
//
|
||||
// async: the client maintains an internal buffer of a fixed size. when the
|
||||
// buffer becomes full, all Async calls will return an error immediately.
|
||||
//
|
||||
// sync: the client blocks on 1) enqueuing the Sync request 2) enqueuing the
|
||||
// Flush requests 3) waiting for the Flush response
|
||||
//
|
||||
// ## Local client
|
||||
//
|
||||
// async: global mutex is locked during each call (meaning it's not really async!)
|
||||
// sync: global mutex is locked during each call
|
||||
//
|
||||
// ## gRPC client
|
||||
//
|
||||
// async: gRPC is synchronous, but an internal buffer of a fixed size is used
|
||||
// to store responses and later call callbacks (separate goroutine per
|
||||
// response).
|
||||
//
|
||||
// sync: waits for all Async calls to complete (essentially what Flush does in
|
||||
// the socket client) and calls Sync method.
|
||||
package abcicli
|
||||
@@ -1,12 +1,12 @@
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
@@ -15,7 +15,10 @@ import (
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
// A gRPC client.
|
||||
var _ Client = (*grpcClient)(nil)
|
||||
|
||||
// A stripped copy of the remoteClient that makes
|
||||
// synchronous calls using grpc
|
||||
type grpcClient struct {
|
||||
service.BaseService
|
||||
mustConnect bool
|
||||
@@ -30,18 +33,6 @@ type grpcClient struct {
|
||||
resCb func(*types.Request, *types.Response) // listens to all callbacks
|
||||
}
|
||||
|
||||
var _ Client = (*grpcClient)(nil)
|
||||
|
||||
// NewGRPCClient creates a gRPC client, which will connect to addr upon the
|
||||
// start. Note Client#Start returns an error if connection is unsuccessful and
|
||||
// mustConnect is true.
|
||||
//
|
||||
// GRPC calls are synchronous, but some callbacks expect to be called
|
||||
// asynchronously (eg. the mempool expects to be able to lock to remove bad txs
|
||||
// from cache). To accommodate, we finish each call in its own go-routine,
|
||||
// which is expensive, but easy - if you want something better, use the socket
|
||||
// protocol! maybe one day, if people really want it, we use grpc streams, but
|
||||
// hopefully not :D
|
||||
func NewGRPCClient(addr string, mustConnect bool) Client {
|
||||
cli := &grpcClient{
|
||||
addr: addr,
|
||||
@@ -63,6 +54,10 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OnStart() error {
|
||||
if err := cli.BaseService.OnStart(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// This processes asynchronous request/response messages and dispatches
|
||||
// them to callbacks.
|
||||
go func() {
|
||||
@@ -71,7 +66,6 @@ func (cli *grpcClient) OnStart() error {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
reqres.SetDone()
|
||||
reqres.Done()
|
||||
|
||||
// Notify client listener if set
|
||||
@@ -80,9 +74,7 @@ func (cli *grpcClient) OnStart() error {
|
||||
}
|
||||
|
||||
// Notify reqRes listener if set
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(reqres.Response)
|
||||
}
|
||||
reqres.InvokeCallback()
|
||||
}
|
||||
for reqres := range cli.chReqRes {
|
||||
if reqres != nil {
|
||||
@@ -95,6 +87,7 @@ func (cli *grpcClient) OnStart() error {
|
||||
|
||||
RETRY_LOOP:
|
||||
for {
|
||||
//nolint:staticcheck // SA1019 Existing use of deprecated but supported dial option.
|
||||
conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
if cli.mustConnect {
|
||||
@@ -125,6 +118,8 @@ RETRY_LOOP:
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OnStop() {
|
||||
cli.BaseService.OnStop()
|
||||
|
||||
if cli.conn != nil {
|
||||
cli.conn.Close()
|
||||
}
|
||||
@@ -163,168 +158,155 @@ func (cli *grpcClient) SetResponseCallback(resCb Callback) {
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// GRPC calls are synchronous, but some callbacks expect to be called asynchronously
|
||||
// (eg. the mempool expects to be able to lock to remove bad txs from cache).
|
||||
// To accommodate, we finish each call in its own go-routine,
|
||||
// which is expensive, but easy - if you want something better, use the socket protocol!
|
||||
// maybe one day, if people really want it, we use grpc streams,
|
||||
// but hopefully not :D
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
|
||||
func (cli *grpcClient) EchoAsync(msg string) *ReqRes {
|
||||
req := types.ToRequestEcho(msg)
|
||||
res, err := cli.client.Echo(ctx, req.GetEcho(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Echo{Echo: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{Echo: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
func (cli *grpcClient) FlushAsync() *ReqRes {
|
||||
req := types.ToRequestFlush()
|
||||
res, err := cli.client.Flush(ctx, req.GetFlush(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Flush{Flush: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{Flush: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) InfoAsync(ctx context.Context, params types.RequestInfo) (*ReqRes, error) {
|
||||
func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
|
||||
req := types.ToRequestInfo(params)
|
||||
res, err := cli.client.Info(ctx, req.GetInfo(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
|
||||
func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
|
||||
req := types.ToRequestSetOption(params)
|
||||
res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{SetOption: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
req := types.ToRequestDeliverTx(params)
|
||||
res, err := cli.client.DeliverTx(ctx, req.GetDeliverTx(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestCheckTx) (*ReqRes, error) {
|
||||
func (cli *grpcClient) CheckTxAsync(params types.RequestCheckTx) *ReqRes {
|
||||
req := types.ToRequestCheckTx(params)
|
||||
res, err := cli.client.CheckTx(ctx, req.GetCheckTx(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) QueryAsync(ctx context.Context, params types.RequestQuery) (*ReqRes, error) {
|
||||
func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
|
||||
req := types.ToRequestQuery(params)
|
||||
res, err := cli.client.Query(ctx, req.GetQuery(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Query{Query: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{Query: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
func (cli *grpcClient) CommitAsync() *ReqRes {
|
||||
req := types.ToRequestCommit()
|
||||
res, err := cli.client.Commit(ctx, req.GetCommit(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Commit{Commit: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{Commit: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) InitChainAsync(ctx context.Context, params types.RequestInitChain) (*ReqRes, error) {
|
||||
func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
|
||||
req := types.ToRequestInitChain(params)
|
||||
res, err := cli.client.InitChain(ctx, req.GetInitChain(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) BeginBlockAsync(ctx context.Context, params types.RequestBeginBlock) (*ReqRes, error) {
|
||||
func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
|
||||
req := types.ToRequestBeginBlock(params)
|
||||
res, err := cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) EndBlockAsync(ctx context.Context, params types.RequestEndBlock) (*ReqRes, error) {
|
||||
func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
|
||||
req := types.ToRequestEndBlock(params)
|
||||
res, err := cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params types.RequestListSnapshots) (*ReqRes, error) {
|
||||
func (cli *grpcClient) ListSnapshotsAsync(params types.RequestListSnapshots) *ReqRes {
|
||||
req := types.ToRequestListSnapshots(params)
|
||||
res, err := cli.client.ListSnapshots(ctx, req.GetListSnapshots(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.ListSnapshots(context.Background(), req.GetListSnapshots(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_ListSnapshots{ListSnapshots: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ListSnapshots{ListSnapshots: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) OfferSnapshotAsync(ctx context.Context, params types.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
func (cli *grpcClient) OfferSnapshotAsync(params types.RequestOfferSnapshot) *ReqRes {
|
||||
req := types.ToRequestOfferSnapshot(params)
|
||||
res, err := cli.client.OfferSnapshot(ctx, req.GetOfferSnapshot(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.OfferSnapshot(context.Background(), req.GetOfferSnapshot(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_OfferSnapshot{OfferSnapshot: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_OfferSnapshot{OfferSnapshot: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) LoadSnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
params types.RequestLoadSnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
func (cli *grpcClient) LoadSnapshotChunkAsync(params types.RequestLoadSnapshotChunk) *ReqRes {
|
||||
req := types.ToRequestLoadSnapshotChunk(params)
|
||||
res, err := cli.client.LoadSnapshotChunk(ctx, req.GetLoadSnapshotChunk(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.LoadSnapshotChunk(context.Background(), req.GetLoadSnapshotChunk(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) ApplySnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
params types.RequestApplySnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
func (cli *grpcClient) ApplySnapshotChunkAsync(params types.RequestApplySnapshotChunk) *ReqRes {
|
||||
req := types.ToRequestApplySnapshotChunk(params)
|
||||
res, err := cli.client.ApplySnapshotChunk(ctx, req.GetApplySnapshotChunk(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.ApplySnapshotChunk(context.Background(), req.GetApplySnapshotChunk(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(
|
||||
ctx,
|
||||
req,
|
||||
&types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}},
|
||||
)
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}})
|
||||
}
|
||||
|
||||
// finishAsyncCall creates a ReqRes for an async call, and immediately populates it
|
||||
// with the response. We don't complete it until it's been ordered via the channel.
|
||||
func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request, res *types.Response) (*ReqRes, error) {
|
||||
func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes {
|
||||
reqres := NewReqRes(req)
|
||||
reqres.Response = res
|
||||
select {
|
||||
case cli.chReqRes <- reqres: // use channel for async responses, since they must be ordered
|
||||
return reqres, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
cli.chReqRes <- reqres // use channel for async responses, since they must be ordered
|
||||
return reqres
|
||||
}
|
||||
|
||||
// finishSyncCall waits for an async call to complete. It is necessary to call all
|
||||
@@ -357,150 +339,81 @@ func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *types.Response {
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *grpcClient) FlushSync(ctx context.Context) error {
|
||||
return nil
|
||||
func (cli *grpcClient) FlushSync() error {
|
||||
reqres := cli.FlushAsync()
|
||||
cli.finishSyncCall(reqres).GetFlush()
|
||||
return cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
reqres, err := cli.EchoAsync(ctx, msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
reqres := cli.EchoAsync(msg)
|
||||
// StopForError should already have been called if error is set
|
||||
return cli.finishSyncCall(reqres).GetEcho(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InfoSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInfo,
|
||||
) (*types.ResponseInfo, error) {
|
||||
reqres, err := cli.InfoAsync(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
reqres := cli.InfoAsync(req)
|
||||
return cli.finishSyncCall(reqres).GetInfo(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
params types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
func (cli *grpcClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
reqres := cli.SetOptionAsync(req)
|
||||
return reqres.Response.GetSetOption(), cli.Error()
|
||||
}
|
||||
|
||||
reqres, err := cli.DeliverTxAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) DeliverTxSync(params types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.DeliverTxAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
params types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
|
||||
reqres, err := cli.CheckTxAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) CheckTxSync(params types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
reqres := cli.CheckTxAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetCheckTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
reqres, err := cli.QueryAsync(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
reqres := cli.QueryAsync(req)
|
||||
return cli.finishSyncCall(reqres).GetQuery(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
reqres, err := cli.CommitAsync(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) CommitSync() (*types.ResponseCommit, error) {
|
||||
reqres := cli.CommitAsync()
|
||||
return cli.finishSyncCall(reqres).GetCommit(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InitChainSync(
|
||||
ctx context.Context,
|
||||
params types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
reqres, err := cli.InitChainAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) InitChainSync(params types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
reqres := cli.InitChainAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetInitChain(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
reqres, err := cli.BeginBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) BeginBlockSync(params types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
reqres := cli.BeginBlockAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetBeginBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
reqres, err := cli.EndBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) EndBlockSync(params types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
reqres := cli.EndBlockAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetEndBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
params types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
reqres, err := cli.ListSnapshotsAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) ListSnapshotsSync(params types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
reqres := cli.ListSnapshotsAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetListSnapshots(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OfferSnapshotSync(
|
||||
ctx context.Context,
|
||||
params types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
reqres, err := cli.OfferSnapshotAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) OfferSnapshotSync(params types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
reqres := cli.OfferSnapshotAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetOfferSnapshot(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) LoadSnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
params types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.LoadSnapshotChunkAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reqres := cli.LoadSnapshotChunkAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetLoadSnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ApplySnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
params types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.ApplySnapshotChunkAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reqres := cli.ApplySnapshotChunkAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
var _ Client = (*localClient)(nil)
|
||||
|
||||
// NOTE: use defer to unlock mutex because Application might panic (e.g., in
|
||||
// case of malicious tx or query). It only makes sense for publicly exposed
|
||||
// methods like CheckTx (/broadcast_tx_* RPC endpoint) or Query (/abci_query
|
||||
@@ -49,22 +49,22 @@ func (app *localClient) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *localClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
func (app *localClient) FlushAsync() *ReqRes {
|
||||
// Do nothing
|
||||
return newLocalReqRes(types.ToRequestFlush(), nil), nil
|
||||
return newLocalReqRes(types.ToRequestFlush(), nil)
|
||||
}
|
||||
|
||||
func (app *localClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
|
||||
func (app *localClient) EchoAsync(msg string) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.callback(
|
||||
types.ToRequestEcho(msg),
|
||||
types.ToResponseEcho(msg),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
|
||||
func (app *localClient) InfoAsync(req types.RequestInfo) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -72,10 +72,21 @@ func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*
|
||||
return app.callback(
|
||||
types.ToRequestInfo(req),
|
||||
types.ToResponseInfo(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
|
||||
func (app *localClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.SetOption(req)
|
||||
return app.callback(
|
||||
types.ToRequestSetOption(req),
|
||||
types.ToResponseSetOption(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -83,10 +94,10 @@ func (app *localClient) DeliverTxAsync(ctx context.Context, params types.Request
|
||||
return app.callback(
|
||||
types.ToRequestDeliverTx(params),
|
||||
types.ToResponseDeliverTx(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
||||
func (app *localClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -94,10 +105,10 @@ func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheck
|
||||
return app.callback(
|
||||
types.ToRequestCheckTx(req),
|
||||
types.ToResponseCheckTx(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
|
||||
func (app *localClient) QueryAsync(req types.RequestQuery) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -105,10 +116,10 @@ func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery)
|
||||
return app.callback(
|
||||
types.ToRequestQuery(req),
|
||||
types.ToResponseQuery(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
func (app *localClient) CommitAsync() *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -116,10 +127,10 @@ func (app *localClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
return app.callback(
|
||||
types.ToRequestCommit(),
|
||||
types.ToResponseCommit(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) {
|
||||
func (app *localClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -127,10 +138,10 @@ func (app *localClient) InitChainAsync(ctx context.Context, req types.RequestIni
|
||||
return app.callback(
|
||||
types.ToRequestInitChain(req),
|
||||
types.ToResponseInitChain(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
|
||||
func (app *localClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -138,10 +149,10 @@ func (app *localClient) BeginBlockAsync(ctx context.Context, req types.RequestBe
|
||||
return app.callback(
|
||||
types.ToRequestBeginBlock(req),
|
||||
types.ToResponseBeginBlock(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
|
||||
func (app *localClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -149,10 +160,10 @@ func (app *localClient) EndBlockAsync(ctx context.Context, req types.RequestEndB
|
||||
return app.callback(
|
||||
types.ToRequestEndBlock(req),
|
||||
types.ToResponseEndBlock(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||
func (app *localClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -160,10 +171,10 @@ func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.Reques
|
||||
return app.callback(
|
||||
types.ToRequestListSnapshots(req),
|
||||
types.ToResponseListSnapshots(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
func (app *localClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -171,13 +182,10 @@ func (app *localClient) OfferSnapshotAsync(ctx context.Context, req types.Reques
|
||||
return app.callback(
|
||||
types.ToRequestOfferSnapshot(req),
|
||||
types.ToResponseOfferSnapshot(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
func (app *localClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -185,13 +193,10 @@ func (app *localClient) LoadSnapshotChunkAsync(
|
||||
return app.callback(
|
||||
types.ToRequestLoadSnapshotChunk(req),
|
||||
types.ToResponseLoadSnapshotChunk(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
func (app *localClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -199,20 +204,20 @@ func (app *localClient) ApplySnapshotChunkAsync(
|
||||
return app.callback(
|
||||
types.ToRequestApplySnapshotChunk(req),
|
||||
types.ToResponseApplySnapshotChunk(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) FlushSync(ctx context.Context) error {
|
||||
func (app *localClient) FlushSync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *localClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
func (app *localClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: msg}, nil
|
||||
}
|
||||
|
||||
func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
func (app *localClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -220,11 +225,15 @@ func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*t
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
func (app *localClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.SetOption(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -232,10 +241,7 @@ func (app *localClient) DeliverTxSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
func (app *localClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -243,10 +249,7 @@ func (app *localClient) CheckTxSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
func (app *localClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -254,7 +257,7 @@ func (app *localClient) QuerySync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
func (app *localClient) CommitSync() (*types.ResponseCommit, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -262,11 +265,7 @@ func (app *localClient) CommitSync(ctx context.Context) (*types.ResponseCommit,
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) InitChainSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
func (app *localClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -274,11 +273,7 @@ func (app *localClient) InitChainSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
func (app *localClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -286,11 +281,7 @@ func (app *localClient) BeginBlockSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
func (app *localClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -298,11 +289,7 @@ func (app *localClient) EndBlockSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
func (app *localClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -310,11 +297,7 @@ func (app *localClient) ListSnapshotsSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) OfferSnapshotSync(
|
||||
ctx context.Context,
|
||||
req types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
func (app *localClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -323,9 +306,7 @@ func (app *localClient) OfferSnapshotSync(
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -334,9 +315,7 @@ func (app *localClient) LoadSnapshotChunkSync(
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -348,12 +327,13 @@ func (app *localClient) ApplySnapshotChunkSync(
|
||||
|
||||
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {
|
||||
app.Callback(req, res)
|
||||
return newLocalReqRes(req, res)
|
||||
rr := newLocalReqRes(req, res)
|
||||
rr.callbackInvoked = true
|
||||
return rr
|
||||
}
|
||||
|
||||
func newLocalReqRes(req *types.Request, res *types.Response) *ReqRes {
|
||||
reqRes := NewReqRes(req)
|
||||
reqRes.Response = res
|
||||
reqRes.SetDone()
|
||||
return reqRes
|
||||
}
|
||||
|
||||
@@ -1,12 +1,9 @@
|
||||
// Code generated by mockery v2.3.0. DO NOT EDIT.
|
||||
// Code generated by mockery v1.1.1. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
|
||||
log "github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
@@ -19,36 +16,29 @@ type Client struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// ApplySnapshotChunkAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// ApplySnapshotChunkAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ApplySnapshotChunkAsync(_a0 types.RequestApplySnapshotChunk) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
return r0
|
||||
}
|
||||
|
||||
// ApplySnapshotChunkSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// ApplySnapshotChunkSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ApplySnapshotChunkSync(_a0 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk)
|
||||
@@ -56,8 +46,8 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(1).(func(types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -65,36 +55,29 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// BeginBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// BeginBlockAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) BeginBlockAsync(_a0 types.RequestBeginBlock) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
return r0
|
||||
}
|
||||
|
||||
// BeginBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// BeginBlockSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) BeginBlockSync(_a0 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseBeginBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseBeginBlock)
|
||||
@@ -102,8 +85,8 @@ func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBloc
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(1).(func(types.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -111,36 +94,29 @@ func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBloc
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CheckTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// CheckTxAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CheckTxAsync(_a0 types.RequestCheckTx) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
return r0
|
||||
}
|
||||
|
||||
// CheckTxSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// CheckTxSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CheckTxSync(_a0 types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCheckTx)
|
||||
@@ -148,30 +124,7 @@ func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*t
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CommitAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CommitAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -180,13 +133,29 @@ func (_m *Client) CommitAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CommitSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// CommitAsync provides a mock function with given fields:
|
||||
func (_m *Client) CommitAsync() *abcicli.ReqRes {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func() *abcicli.ReqRes); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// CommitSync provides a mock function with given fields:
|
||||
func (_m *Client) CommitSync() (*types.ResponseCommit, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 *types.ResponseCommit
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *types.ResponseCommit); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func() *types.ResponseCommit); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCommit)
|
||||
@@ -194,7 +163,46 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DeliverTxAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) DeliverTxAsync(_a0 types.RequestDeliverTx) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// DeliverTxSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) DeliverTxSync(_a0 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseDeliverTx
|
||||
if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseDeliverTx)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -203,82 +211,29 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error)
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DeliverTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// EchoAsync provides a mock function with given fields: msg
|
||||
func (_m *Client) EchoAsync(msg string) *abcicli.ReqRes {
|
||||
ret := _m.Called(msg)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(string) *abcicli.ReqRes); ok {
|
||||
r0 = rf(msg)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
return r0
|
||||
}
|
||||
|
||||
// DeliverTxSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseDeliverTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseDeliverTx)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EchoAsync provides a mock function with given fields: ctx, msg
|
||||
func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(ctx, msg)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *abcicli.ReqRes); ok {
|
||||
r0 = rf(ctx, msg)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, msg)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EchoSync provides a mock function with given fields: ctx, msg
|
||||
func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
ret := _m.Called(ctx, msg)
|
||||
// EchoSync provides a mock function with given fields: msg
|
||||
func (_m *Client) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
ret := _m.Called(msg)
|
||||
|
||||
var r0 *types.ResponseEcho
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *types.ResponseEcho); ok {
|
||||
r0 = rf(ctx, msg)
|
||||
if rf, ok := ret.Get(0).(func(string) *types.ResponseEcho); ok {
|
||||
r0 = rf(msg)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseEcho)
|
||||
@@ -286,8 +241,8 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, msg)
|
||||
if rf, ok := ret.Get(1).(func(string) error); ok {
|
||||
r1 = rf(msg)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -295,36 +250,29 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EndBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// EndBlockAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) EndBlockAsync(_a0 types.RequestEndBlock) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestEndBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
return r0
|
||||
}
|
||||
|
||||
// EndBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// EndBlockSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) EndBlockSync(_a0 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseEndBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestEndBlock) *types.ResponseEndBlock); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseEndBlock)
|
||||
@@ -332,8 +280,8 @@ func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(1).(func(types.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -355,12 +303,42 @@ func (_m *Client) Error() error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// FlushAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
// FlushAsync provides a mock function with given fields:
|
||||
func (_m *Client) FlushAsync() *abcicli.ReqRes {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func() *abcicli.ReqRes); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// FlushSync provides a mock function with given fields:
|
||||
func (_m *Client) FlushSync() error {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// InfoAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) InfoAsync(_a0 types.RequestInfo) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok {
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInfo) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -368,8 +346,24 @@ func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// InfoSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) InfoSync(_a0 types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInfo) *types.ResponseInfo); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInfo)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(types.RequestInfo) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -378,96 +372,29 @@ func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// FlushSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) FlushSync(_a0 context.Context) error {
|
||||
// InitChainAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) InitChainAsync(_a0 types.RequestInitChain) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInitChain) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// InfoAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InfoSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *types.ResponseInfo); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInfo)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChainAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChainSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InitChainSync(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// InitChainSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) InitChainSync(_a0 types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseInitChain
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInitChain) *types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInitChain)
|
||||
@@ -475,8 +402,8 @@ func (_m *Client) InitChainSync(_a0 context.Context, _a1 types.RequestInitChain)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(1).(func(types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -498,36 +425,29 @@ func (_m *Client) IsRunning() bool {
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListSnapshotsAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// ListSnapshotsAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ListSnapshotsAsync(_a0 types.RequestListSnapshots) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListSnapshotsSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// ListSnapshotsSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseListSnapshots
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseListSnapshots)
|
||||
@@ -535,8 +455,8 @@ func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSn
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(1).(func(types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -544,36 +464,29 @@ func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSn
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// LoadSnapshotChunkAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// LoadSnapshotChunkAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) LoadSnapshotChunkAsync(_a0 types.RequestLoadSnapshotChunk) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
return r0
|
||||
}
|
||||
|
||||
// LoadSnapshotChunkSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// LoadSnapshotChunkSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseLoadSnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk)
|
||||
@@ -581,8 +494,8 @@ func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLo
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(1).(func(types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -590,36 +503,29 @@ func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLo
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// OfferSnapshotAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// OfferSnapshotAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) OfferSnapshotAsync(_a0 types.RequestOfferSnapshot) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
return r0
|
||||
}
|
||||
|
||||
// OfferSnapshotSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// OfferSnapshotSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) OfferSnapshotSync(_a0 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseOfferSnapshot
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseOfferSnapshot)
|
||||
@@ -627,8 +533,8 @@ func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferS
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(1).(func(types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -669,36 +575,29 @@ func (_m *Client) OnStop() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// QueryAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// QueryAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) QueryAsync(_a0 types.RequestQuery) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestQuery) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
return r0
|
||||
}
|
||||
|
||||
// QuerySync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// QuerySync provides a mock function with given fields: _a0
|
||||
func (_m *Client) QuerySync(_a0 types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseQuery
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *types.ResponseQuery); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestQuery) *types.ResponseQuery); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseQuery)
|
||||
@@ -706,8 +605,8 @@ func (_m *Client) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(1).(func(types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -750,6 +649,45 @@ func (_m *Client) SetLogger(_a0 log.Logger) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
// SetOptionAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) SetOptionAsync(_a0 types.RequestSetOption) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestSetOption) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// SetOptionSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) SetOptionSync(_a0 types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseSetOption
|
||||
if rf, ok := ret.Get(0).(func(types.RequestSetOption) *types.ResponseSetOption); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseSetOption)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestSetOption) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// SetResponseCallback provides a mock function with given fields: _a0
|
||||
func (_m *Client) SetResponseCallback(_a0 abcicli.Callback) {
|
||||
_m.Called(_a0)
|
||||
|
||||
@@ -3,7 +3,6 @@ package abcicli
|
||||
import (
|
||||
"bufio"
|
||||
"container/list"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -19,18 +18,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// reqQueueSize is the max number of queued async requests.
|
||||
// (memory: 256MB max assuming 1MB transactions)
|
||||
reqQueueSize = 256
|
||||
// Don't wait longer than...
|
||||
flushThrottleMS = 20
|
||||
reqQueueSize = 256 // TODO make configurable
|
||||
flushThrottleMS = 20 // Don't wait longer than...
|
||||
)
|
||||
|
||||
type reqResWithContext struct {
|
||||
R *ReqRes
|
||||
C context.Context // if context.Err is not nil, reqRes will be thrown away (ignored)
|
||||
}
|
||||
|
||||
// This is goroutine-safe, but users should beware that the application in
|
||||
// general is not meant to be interfaced with concurrent callers.
|
||||
type socketClient struct {
|
||||
@@ -40,7 +31,7 @@ type socketClient struct {
|
||||
mustConnect bool
|
||||
conn net.Conn
|
||||
|
||||
reqQueue chan *reqResWithContext
|
||||
reqQueue chan *ReqRes
|
||||
flushTimer *timer.ThrottleTimer
|
||||
|
||||
mtx tmsync.Mutex
|
||||
@@ -56,7 +47,7 @@ var _ Client = (*socketClient)(nil)
|
||||
// if it fails to connect.
|
||||
func NewSocketClient(addr string, mustConnect bool) Client {
|
||||
cli := &socketClient{
|
||||
reqQueue: make(chan *reqResWithContext, reqQueueSize),
|
||||
reqQueue: make(chan *ReqRes, reqQueueSize),
|
||||
flushTimer: timer.NewThrottleTimer("socketClient", flushThrottleMS),
|
||||
mustConnect: mustConnect,
|
||||
|
||||
@@ -132,20 +123,15 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
|
||||
case reqres := <-cli.reqQueue:
|
||||
// cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request)
|
||||
|
||||
if reqres.C.Err() != nil {
|
||||
cli.Logger.Debug("Request's context is done", "req", reqres.R, "err", reqres.C.Err())
|
||||
continue
|
||||
}
|
||||
|
||||
cli.willSendReq(reqres.R)
|
||||
err := types.WriteMessage(reqres.R.Request, w)
|
||||
cli.willSendReq(reqres)
|
||||
err := types.WriteMessage(reqres.Request, w)
|
||||
if err != nil {
|
||||
cli.stopForError(fmt.Errorf("write to buffer: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
// If it's a flush request, flush the current buffer.
|
||||
if _, ok := reqres.R.Request.Value.(*types.Request_Flush); ok {
|
||||
if _, ok := reqres.Request.Value.(*types.Request_Flush); ok {
|
||||
err = w.Flush()
|
||||
if err != nil {
|
||||
cli.stopForError(fmt.Errorf("flush buffer: %w", err))
|
||||
@@ -154,7 +140,7 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
|
||||
}
|
||||
case <-cli.flushTimer.Ch: // flush queue
|
||||
select {
|
||||
case cli.reqQueue <- &reqResWithContext{R: NewReqRes(types.ToRequestFlush()), C: context.Background()}:
|
||||
case cli.reqQueue <- NewReqRes(types.ToRequestFlush()):
|
||||
default:
|
||||
// Probably will fill the buffer, or retry later.
|
||||
}
|
||||
@@ -226,273 +212,218 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
//
|
||||
// NOTE: It is possible this callback isn't set on the reqres object. At this
|
||||
// point, in which case it will be called after, when it is set.
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(res)
|
||||
}
|
||||
reqres.InvokeCallback()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestEcho(msg))
|
||||
func (cli *socketClient) EchoAsync(msg string) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestEcho(msg))
|
||||
}
|
||||
|
||||
func (cli *socketClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestFlush())
|
||||
func (cli *socketClient) FlushAsync() *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestFlush())
|
||||
}
|
||||
|
||||
func (cli *socketClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestInfo(req))
|
||||
func (cli *socketClient) InfoAsync(req types.RequestInfo) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestInfo(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestDeliverTx(req))
|
||||
func (cli *socketClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestSetOption(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req))
|
||||
func (cli *socketClient) DeliverTxAsync(req types.RequestDeliverTx) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestDeliverTx(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestQuery(req))
|
||||
func (cli *socketClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestCheckTx(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestCommit())
|
||||
func (cli *socketClient) QueryAsync(req types.RequestQuery) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestQuery(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestInitChain(req))
|
||||
func (cli *socketClient) CommitAsync() *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestCommit())
|
||||
}
|
||||
|
||||
func (cli *socketClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestBeginBlock(req))
|
||||
func (cli *socketClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestInitChain(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestEndBlock(req))
|
||||
func (cli *socketClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestBeginBlock(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestListSnapshots(req))
|
||||
func (cli *socketClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestEndBlock(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestOfferSnapshot(req))
|
||||
func (cli *socketClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestListSnapshots(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
func (cli *socketClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestOfferSnapshot(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
func (cli *socketClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestLoadSnapshotChunk(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestApplySnapshotChunk(req))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) FlushSync(ctx context.Context) error {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestFlush(), true)
|
||||
if err != nil {
|
||||
return queueErr(err)
|
||||
}
|
||||
|
||||
func (cli *socketClient) FlushSync() error {
|
||||
reqRes := cli.queueRequest(types.ToRequestFlush())
|
||||
if err := cli.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gotResp := make(chan struct{})
|
||||
go func() {
|
||||
// NOTE: if we don't flush the queue, its possible to get stuck here
|
||||
reqRes.Wait()
|
||||
close(gotResp)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-gotResp:
|
||||
return cli.Error()
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
reqRes.Wait() // NOTE: if we don't flush the queue, its possible to get stuck here
|
||||
return cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEcho(msg))
|
||||
if err != nil {
|
||||
func (cli *socketClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestEcho(msg))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetEcho(), nil
|
||||
|
||||
return reqres.Response.GetEcho(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) InfoSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInfo,
|
||||
) (*types.ResponseInfo, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInfo(req))
|
||||
if err != nil {
|
||||
func (cli *socketClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestInfo(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetInfo(), nil
|
||||
|
||||
return reqres.Response.GetInfo(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestDeliverTx(req))
|
||||
if err != nil {
|
||||
func (cli *socketClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestSetOption(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetDeliverTx(), nil
|
||||
|
||||
return reqres.Response.GetSetOption(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCheckTx(req))
|
||||
if err != nil {
|
||||
func (cli *socketClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestDeliverTx(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetCheckTx(), nil
|
||||
|
||||
return reqres.Response.GetDeliverTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestQuery(req))
|
||||
if err != nil {
|
||||
func (cli *socketClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestCheckTx(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetQuery(), nil
|
||||
|
||||
return reqres.Response.GetCheckTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCommit())
|
||||
if err != nil {
|
||||
func (cli *socketClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestQuery(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetCommit(), nil
|
||||
|
||||
return reqres.Response.GetQuery(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChainSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInitChain(req))
|
||||
if err != nil {
|
||||
func (cli *socketClient) CommitSync() (*types.ResponseCommit, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestCommit())
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetInitChain(), nil
|
||||
|
||||
return reqres.Response.GetCommit(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestBeginBlock(req))
|
||||
if err != nil {
|
||||
func (cli *socketClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestInitChain(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetBeginBlock(), nil
|
||||
|
||||
return reqres.Response.GetInitChain(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEndBlock(req))
|
||||
if err != nil {
|
||||
func (cli *socketClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestBeginBlock(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetEndBlock(), nil
|
||||
|
||||
return reqres.Response.GetBeginBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestListSnapshots(req))
|
||||
if err != nil {
|
||||
func (cli *socketClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestEndBlock(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetListSnapshots(), nil
|
||||
|
||||
return reqres.Response.GetEndBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshotSync(
|
||||
ctx context.Context,
|
||||
req types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestOfferSnapshot(req))
|
||||
if err != nil {
|
||||
func (cli *socketClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestListSnapshots(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetOfferSnapshot(), nil
|
||||
|
||||
return reqres.Response.GetListSnapshots(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestOfferSnapshot(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetOfferSnapshot(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
if err != nil {
|
||||
reqres := cli.queueRequest(types.ToRequestLoadSnapshotChunk(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetLoadSnapshotChunk(), nil
|
||||
|
||||
return reqres.Response.GetLoadSnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
if err != nil {
|
||||
reqres := cli.queueRequest(types.ToRequestApplySnapshotChunk(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetApplySnapshotChunk(), nil
|
||||
return reqres.Response.GetApplySnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
// queueRequest enqueues req onto the queue. If the queue is full, it ether
|
||||
// returns an error (sync=false) or blocks (sync=true).
|
||||
//
|
||||
// When sync=true, ctx can be used to break early. When sync=false, ctx will be
|
||||
// used later to determine if request should be dropped (if ctx.Err is
|
||||
// non-nil).
|
||||
//
|
||||
// The caller is responsible for checking cli.Error.
|
||||
func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, sync bool) (*ReqRes, error) {
|
||||
func (cli *socketClient) queueRequest(req *types.Request) *ReqRes {
|
||||
reqres := NewReqRes(req)
|
||||
|
||||
if sync {
|
||||
select {
|
||||
case cli.reqQueue <- &reqResWithContext{R: reqres, C: context.Background()}:
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case cli.reqQueue <- &reqResWithContext{R: reqres, C: ctx}:
|
||||
default:
|
||||
return nil, errors.New("buffer is full")
|
||||
}
|
||||
}
|
||||
// TODO: set cli.err if reqQueue times out
|
||||
cli.reqQueue <- reqres
|
||||
|
||||
// Maybe auto-flush, or unset auto-flush
|
||||
switch req.Value.(type) {
|
||||
@@ -502,41 +433,7 @@ func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, s
|
||||
cli.flushTimer.Set()
|
||||
}
|
||||
|
||||
return reqres, nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) queueRequestAsync(
|
||||
ctx context.Context,
|
||||
req *types.Request,
|
||||
) (*ReqRes, error) {
|
||||
|
||||
reqres, err := cli.queueRequest(ctx, req, false)
|
||||
if err != nil {
|
||||
return nil, queueErr(err)
|
||||
}
|
||||
|
||||
return reqres, cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) queueRequestAndFlushSync(
|
||||
ctx context.Context,
|
||||
req *types.Request,
|
||||
) (*ReqRes, error) {
|
||||
|
||||
reqres, err := cli.queueRequest(ctx, req, true)
|
||||
if err != nil {
|
||||
return nil, queueErr(err)
|
||||
}
|
||||
|
||||
if err := cli.FlushSync(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres, cli.Error()
|
||||
}
|
||||
|
||||
func queueErr(e error) error {
|
||||
return fmt.Errorf("can't queue req: %w", e)
|
||||
return reqres
|
||||
}
|
||||
|
||||
func (cli *socketClient) flushQueue() {
|
||||
@@ -554,7 +451,7 @@ LOOP:
|
||||
for {
|
||||
select {
|
||||
case reqres := <-cli.reqQueue:
|
||||
reqres.R.Done()
|
||||
reqres.Done()
|
||||
default:
|
||||
break LOOP
|
||||
}
|
||||
@@ -571,6 +468,8 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_Flush)
|
||||
case *types.Request_Info:
|
||||
_, ok = res.Value.(*types.Response_Info)
|
||||
case *types.Request_SetOption:
|
||||
_, ok = res.Value.(*types.Response_SetOption)
|
||||
case *types.Request_DeliverTx:
|
||||
_, ok = res.Value.(*types.Response_DeliverTx)
|
||||
case *types.Request_CheckTx:
|
||||
@@ -603,10 +502,12 @@ func (cli *socketClient) stopForError(err error) {
|
||||
}
|
||||
|
||||
cli.mtx.Lock()
|
||||
cli.err = err
|
||||
if cli.err == nil {
|
||||
cli.err = err
|
||||
}
|
||||
cli.mtx.Unlock()
|
||||
|
||||
cli.Logger.Info("Stopping abci.socketClient", "reason", err)
|
||||
cli.Logger.Error(fmt.Sprintf("Stopping abci.socketClient for error: %v", err.Error()))
|
||||
if err := cli.Stop(); err != nil {
|
||||
cli.Logger.Error("Error stopping abci.socketClient", "err", err)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package abcicli_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -16,8 +16,6 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func TestProperSyncCalls(t *testing.T) {
|
||||
app := slowApp{}
|
||||
|
||||
@@ -36,12 +34,11 @@ func TestProperSyncCalls(t *testing.T) {
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// This is BeginBlockSync unrolled....
|
||||
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||
assert.NoError(t, err)
|
||||
err = c.FlushSync(context.Background())
|
||||
assert.NoError(t, err)
|
||||
reqres := c.BeginBlockAsync(types.RequestBeginBlock{})
|
||||
err := c.FlushSync()
|
||||
require.NoError(t, err)
|
||||
res := reqres.Response.GetBeginBlock()
|
||||
assert.NotNil(t, res)
|
||||
require.NotNil(t, res)
|
||||
resp <- c.Error()
|
||||
}()
|
||||
|
||||
@@ -72,16 +69,14 @@ func TestHangingSyncCalls(t *testing.T) {
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// Start BeginBlock and flush it
|
||||
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||
assert.NoError(t, err)
|
||||
flush, err := c.FlushAsync(ctx)
|
||||
assert.NoError(t, err)
|
||||
reqres := c.BeginBlockAsync(types.RequestBeginBlock{})
|
||||
flush := c.FlushAsync()
|
||||
// wait 20 ms for all events to travel socket, but
|
||||
// no response yet from server
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
// kill the server, so the connections break
|
||||
err = s.Stop()
|
||||
assert.NoError(t, err)
|
||||
err := s.Stop()
|
||||
require.NoError(t, err)
|
||||
|
||||
// wait for the response from BeginBlock
|
||||
reqres.Wait()
|
||||
@@ -124,3 +119,71 @@ func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
// TestCallbackInvokedWhenSetLaet ensures that the callback is invoked when
|
||||
// set after the client completes the call into the app. Currently this
|
||||
// test relies on the callback being allowed to be invoked twice if set multiple
|
||||
// times, once when set early and once when set late.
|
||||
func TestCallbackInvokedWhenSetLate(t *testing.T) {
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
app := blockedABCIApplication{
|
||||
wg: wg,
|
||||
}
|
||||
_, c := setupClientServer(t, app)
|
||||
reqRes := c.CheckTxAsync(types.RequestCheckTx{})
|
||||
|
||||
done := make(chan struct{})
|
||||
cb := func(_ *types.Response) {
|
||||
close(done)
|
||||
}
|
||||
reqRes.SetCallback(cb)
|
||||
app.wg.Done()
|
||||
<-done
|
||||
|
||||
var called bool
|
||||
cb = func(_ *types.Response) {
|
||||
called = true
|
||||
}
|
||||
reqRes.SetCallback(cb)
|
||||
require.True(t, called)
|
||||
}
|
||||
|
||||
type blockedABCIApplication struct {
|
||||
wg *sync.WaitGroup
|
||||
types.BaseApplication
|
||||
}
|
||||
|
||||
func (b blockedABCIApplication) CheckTx(r types.RequestCheckTx) types.ResponseCheckTx {
|
||||
b.wg.Wait()
|
||||
return b.BaseApplication.CheckTx(r)
|
||||
}
|
||||
|
||||
// TestCallbackInvokedWhenSetEarly ensures that the callback is invoked when
|
||||
// set before the client completes the call into the app.
|
||||
func TestCallbackInvokedWhenSetEarly(t *testing.T) {
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
app := blockedABCIApplication{
|
||||
wg: wg,
|
||||
}
|
||||
_, c := setupClientServer(t, app)
|
||||
reqRes := c.CheckTxAsync(types.RequestCheckTx{})
|
||||
|
||||
done := make(chan struct{})
|
||||
cb := func(_ *types.Response) {
|
||||
close(done)
|
||||
}
|
||||
reqRes.SetCallback(cb)
|
||||
app.wg.Done()
|
||||
|
||||
called := func() bool {
|
||||
select {
|
||||
case <-done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
require.Eventually(t, called, time.Second, time.Millisecond*25)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -17,7 +16,6 @@ import (
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/example/counter"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
servertest "github.com/tendermint/tendermint/abci/tests/server"
|
||||
@@ -30,8 +28,6 @@ import (
|
||||
var (
|
||||
client abcicli.Client
|
||||
logger log.Logger
|
||||
|
||||
ctx = context.Background()
|
||||
)
|
||||
|
||||
// flags
|
||||
@@ -47,9 +43,6 @@ var (
|
||||
flagHeight int
|
||||
flagProve bool
|
||||
|
||||
// counter
|
||||
flagSerial bool
|
||||
|
||||
// kvstore
|
||||
flagPersist string
|
||||
)
|
||||
@@ -61,9 +54,7 @@ var RootCmd = &cobra.Command{
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
switch cmd.Use {
|
||||
case "counter", "kvstore": // for the examples apps, don't pre-run
|
||||
return nil
|
||||
case "version": // skip running for version command
|
||||
case "kvstore", "version":
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -138,10 +129,6 @@ func addQueryFlags() {
|
||||
"whether or not to return a merkle proof of the query result")
|
||||
}
|
||||
|
||||
func addCounterFlags() {
|
||||
counterCmd.PersistentFlags().BoolVarP(&flagSerial, "serial", "", false, "enforce incrementing (serial) transactions")
|
||||
}
|
||||
|
||||
func addKVStoreFlags() {
|
||||
kvstoreCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
|
||||
}
|
||||
@@ -151,6 +138,7 @@ func addCommands() {
|
||||
RootCmd.AddCommand(consoleCmd)
|
||||
RootCmd.AddCommand(echoCmd)
|
||||
RootCmd.AddCommand(infoCmd)
|
||||
RootCmd.AddCommand(setOptionCmd)
|
||||
RootCmd.AddCommand(deliverTxCmd)
|
||||
RootCmd.AddCommand(checkTxCmd)
|
||||
RootCmd.AddCommand(commitCmd)
|
||||
@@ -160,8 +148,6 @@ func addCommands() {
|
||||
RootCmd.AddCommand(queryCmd)
|
||||
|
||||
// examples
|
||||
addCounterFlags()
|
||||
RootCmd.AddCommand(counterCmd)
|
||||
addKVStoreFlags()
|
||||
RootCmd.AddCommand(kvstoreCmd)
|
||||
}
|
||||
@@ -178,6 +164,7 @@ you'd like to run:
|
||||
|
||||
where example.file looks something like:
|
||||
|
||||
set_option serial on
|
||||
check_tx 0x00
|
||||
check_tx 0xff
|
||||
deliver_tx 0x00
|
||||
@@ -199,7 +186,7 @@ This command opens an interactive console for running any of the other commands
|
||||
without opening a new connection each time
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
ValidArgs: []string{"echo", "info", "deliver_tx", "check_tx", "commit", "query"},
|
||||
ValidArgs: []string{"echo", "info", "set_option", "deliver_tx", "check_tx", "commit", "query"},
|
||||
RunE: cmdConsole,
|
||||
}
|
||||
|
||||
@@ -217,6 +204,13 @@ var infoCmd = &cobra.Command{
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdInfo,
|
||||
}
|
||||
var setOptionCmd = &cobra.Command{
|
||||
Use: "set_option",
|
||||
Short: "set an option on the application",
|
||||
Long: "set an option on the application",
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: cmdSetOption,
|
||||
}
|
||||
|
||||
var deliverTxCmd = &cobra.Command{
|
||||
Use: "deliver_tx",
|
||||
@@ -261,14 +255,6 @@ var queryCmd = &cobra.Command{
|
||||
RunE: cmdQuery,
|
||||
}
|
||||
|
||||
var counterCmd = &cobra.Command{
|
||||
Use: "counter",
|
||||
Short: "ABCI demo example",
|
||||
Long: "ABCI demo example",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdCounter,
|
||||
}
|
||||
|
||||
var kvstoreCmd = &cobra.Command{
|
||||
Use: "kvstore",
|
||||
Short: "ABCI demo example",
|
||||
@@ -318,6 +304,7 @@ func cmdTest(cmd *cobra.Command, args []string) error {
|
||||
return compose(
|
||||
[]func() error{
|
||||
func() error { return servertest.InitChain(client) },
|
||||
func() error { return servertest.SetOption(client, "serial", "on") },
|
||||
func() error { return servertest.Commit(client, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil) },
|
||||
func() error { return servertest.Commit(client, nil) },
|
||||
@@ -432,6 +419,8 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
|
||||
return cmdInfo(cmd, actualArgs)
|
||||
case "query":
|
||||
return cmdQuery(cmd, actualArgs)
|
||||
case "set_option":
|
||||
return cmdSetOption(cmd, actualArgs)
|
||||
default:
|
||||
return cmdUnimplemented(cmd, pArgs)
|
||||
}
|
||||
@@ -455,6 +444,7 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short)
|
||||
fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short)
|
||||
fmt.Printf("%s: %s\n", setOptionCmd.Use, setOptionCmd.Short)
|
||||
fmt.Println("Use \"[command] --help\" for more information about a command.")
|
||||
|
||||
return nil
|
||||
@@ -466,7 +456,7 @@ func cmdEcho(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
msg = args[0]
|
||||
}
|
||||
res, err := client.EchoSync(ctx, msg)
|
||||
res, err := client.EchoSync(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -482,7 +472,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 1 {
|
||||
version = args[0]
|
||||
}
|
||||
res, err := client.InfoSync(ctx, types.RequestInfo{Version: version})
|
||||
res, err := client.InfoSync(types.RequestInfo{Version: version})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -494,6 +484,25 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
|
||||
|
||||
const codeBad uint32 = 10
|
||||
|
||||
// Set an option on the application
|
||||
func cmdSetOption(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 2 {
|
||||
printResponse(cmd, args, response{
|
||||
Code: codeBad,
|
||||
Log: "want at least arguments of the form: <key> <value>",
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
key, val := args[0], args[1]
|
||||
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: val})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
printResponse(cmd, args, response{Log: "OK (SetOption doesn't return anything.)"}) // NOTE: Nothing to show...
|
||||
return nil
|
||||
}
|
||||
|
||||
// Append a new tx to application
|
||||
func cmdDeliverTx(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
@@ -507,7 +516,7 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
res, err := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -533,7 +542,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes})
|
||||
res, err := client.CheckTxSync(types.RequestCheckTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -548,7 +557,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Get application Merkle root hash
|
||||
func cmdCommit(cmd *cobra.Command, args []string) error {
|
||||
res, err := client.CommitSync(ctx)
|
||||
res, err := client.CommitSync()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -573,7 +582,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resQuery, err := client.QuerySync(ctx, types.RequestQuery{
|
||||
resQuery, err := client.QuerySync(types.RequestQuery{
|
||||
Data: queryBytes,
|
||||
Path: flagPath,
|
||||
Height: int64(flagHeight),
|
||||
@@ -596,32 +605,6 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
app := counter.NewApplication(flagSerial)
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddress, flagAbci, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
if err := srv.Stop(); err != nil {
|
||||
logger.Error("Error while stopping server", "err", err)
|
||||
}
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
}
|
||||
|
||||
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
|
||||
@@ -1,86 +0,0 @@
|
||||
package counter
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
type Application struct {
|
||||
types.BaseApplication
|
||||
|
||||
hashCount int
|
||||
txCount int
|
||||
serial bool
|
||||
}
|
||||
|
||||
func NewApplication(serial bool) *Application {
|
||||
return &Application{serial: serial}
|
||||
}
|
||||
|
||||
func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
|
||||
}
|
||||
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue != uint64(app.txCount) {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
}
|
||||
app.txCount++
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
return types.ResponseCheckTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue < uint64(app.txCount) {
|
||||
return types.ResponseCheckTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)}
|
||||
}
|
||||
}
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *Application) Commit() (resp types.ResponseCommit) {
|
||||
app.hashCount++
|
||||
if app.txCount == 0 {
|
||||
return types.ResponseCommit{}
|
||||
}
|
||||
hash := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
|
||||
return types.ResponseCommit{Data: hash}
|
||||
}
|
||||
|
||||
func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
switch reqQuery.Path {
|
||||
case "hash":
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}
|
||||
case "tx":
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))}
|
||||
default:
|
||||
return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,6 @@
|
||||
package example
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
@@ -14,6 +13,8 @@ import (
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
|
||||
@@ -44,7 +45,7 @@ func TestGRPC(t *testing.T) {
|
||||
}
|
||||
|
||||
func testStream(t *testing.T, app types.Application) {
|
||||
const numDeliverTxs = 20000
|
||||
numDeliverTxs := 20000
|
||||
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
socket := fmt.Sprintf("unix://%v", socketFile)
|
||||
@@ -52,8 +53,9 @@ func testStream(t *testing.T, app types.Application) {
|
||||
// Start the listener
|
||||
server := abciserver.NewSocketServer(socket, app)
|
||||
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
|
||||
err := server.Start()
|
||||
require.NoError(t, err)
|
||||
if err := server.Start(); err != nil {
|
||||
require.NoError(t, err, "Error starting socket server")
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
@@ -63,8 +65,9 @@ func testStream(t *testing.T, app types.Application) {
|
||||
// Connect to the socket
|
||||
client := abcicli.NewSocketClient(socket, false)
|
||||
client.SetLogger(log.TestingLogger().With("module", "abci-client"))
|
||||
err = client.Start()
|
||||
require.NoError(t, err)
|
||||
if err := client.Start(); err != nil {
|
||||
t.Fatalf("Error starting socket client: %v", err.Error())
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
@@ -98,24 +101,22 @@ func testStream(t *testing.T, app types.Application) {
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
// Send request
|
||||
_, err = client.DeliverTxAsync(ctx, types.RequestDeliverTx{Tx: []byte("test")})
|
||||
require.NoError(t, err)
|
||||
reqRes := client.DeliverTxAsync(types.RequestDeliverTx{Tx: []byte("test")})
|
||||
_ = reqRes
|
||||
// check err ?
|
||||
|
||||
// Sometimes send flush messages
|
||||
if counter%128 == 0 {
|
||||
err = client.FlushSync(context.Background())
|
||||
require.NoError(t, err)
|
||||
if counter%123 == 0 {
|
||||
client.FlushAsync()
|
||||
// check err ?
|
||||
}
|
||||
}
|
||||
|
||||
// Send final flush message
|
||||
_, err = client.FlushAsync(ctx)
|
||||
require.NoError(t, err)
|
||||
client.FlushAsync()
|
||||
|
||||
<-done
|
||||
}
|
||||
@@ -129,7 +130,7 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
|
||||
func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
|
||||
numDeliverTxs := 2000
|
||||
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
|
||||
socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
socket := fmt.Sprintf("unix://%v", socketFile)
|
||||
|
||||
@@ -147,6 +148,7 @@ func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
|
||||
})
|
||||
|
||||
// Connect to the socket
|
||||
//nolint:staticcheck // SA1019 Existing use of deprecated but supported dial option.
|
||||
conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
t.Fatalf("Error dialing GRPC server: %v", err.Error())
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
package kvstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
@@ -24,8 +23,6 @@ const (
|
||||
testValue = "def"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||
req := types.RequestDeliverTx{Tx: tx}
|
||||
ar := app.DeliverTx(req)
|
||||
@@ -74,7 +71,7 @@ func TestKVStoreKV(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreKV(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -90,7 +87,7 @@ func TestPersistentKVStoreKV(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -122,7 +119,7 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
|
||||
// add a validator, remove a validator, update a validator
|
||||
func TestValUpdates(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -296,7 +293,7 @@ func TestClientServer(t *testing.T) {
|
||||
|
||||
// set up grpc app
|
||||
kvstore = NewApplication()
|
||||
gclient, gserver, err := makeGRPCClientServer(kvstore, "kvstore-grpc")
|
||||
gclient, gserver, err := makeGRPCClientServer(kvstore, "/tmp/kvstore-grpc")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
@@ -326,23 +323,23 @@ func runClientTests(t *testing.T, client abcicli.Client) {
|
||||
}
|
||||
|
||||
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
|
||||
ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
ar, err := app.DeliverTxSync(types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// repeating tx doesn't raise error
|
||||
ar, err = app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
ar, err = app.DeliverTxSync(types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// commit
|
||||
_, err = app.CommitSync(ctx)
|
||||
_, err = app.CommitSync()
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := app.InfoSync(ctx, types.RequestInfo{})
|
||||
info, err := app.InfoSync(types.RequestInfo{})
|
||||
require.NoError(t, err)
|
||||
require.NotZero(t, info.LastBlockHeight)
|
||||
|
||||
// make sure query is fine
|
||||
resQuery, err := app.QuerySync(ctx, types.RequestQuery{
|
||||
resQuery, err := app.QuerySync(types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
})
|
||||
@@ -353,7 +350,7 @@ func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string)
|
||||
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
|
||||
|
||||
// make sure proof is fine
|
||||
resQuery, err = app.QuerySync(ctx, types.RequestQuery{
|
||||
resQuery, err = app.QuerySync(types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
Prove: true,
|
||||
|
||||
@@ -62,6 +62,10 @@ func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.Respo
|
||||
return res
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption {
|
||||
return app.app.SetOption(req)
|
||||
}
|
||||
|
||||
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
// if it starts with "val:", update the validator set
|
||||
|
||||
@@ -200,6 +200,9 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
case *types.Request_Info:
|
||||
res := s.app.Info(*r.Info)
|
||||
responses <- types.ToResponseInfo(res)
|
||||
case *types.Request_SetOption:
|
||||
res := s.app.SetOption(*r.SetOption)
|
||||
responses <- types.ToResponseSetOption(res)
|
||||
case *types.Request_DeliverTx:
|
||||
res := s.app.DeliverTx(*r.DeliverTx)
|
||||
responses <- types.ToResponseDeliverTx(res)
|
||||
|
||||
@@ -19,9 +19,12 @@ func TestClientServerNoAddrPrefix(t *testing.T) {
|
||||
assert.NoError(t, err, "expected no error on NewServer")
|
||||
err = server.Start()
|
||||
assert.NoError(t, err, "expected no error on server.Start")
|
||||
defer func() { _ = server.Stop() }()
|
||||
|
||||
client, err := abciclient.NewClient(addr, transport, true)
|
||||
assert.NoError(t, err, "expected no error on NewClient")
|
||||
err = client.Start()
|
||||
assert.NoError(t, err, "expected no error on client.Start")
|
||||
|
||||
_ = client.Stop()
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package testsuite
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
@@ -11,8 +10,6 @@ import (
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func InitChain(client abcicli.Client) error {
|
||||
total := 10
|
||||
vals := make([]types.ValidatorUpdate, total)
|
||||
@@ -21,7 +18,7 @@ func InitChain(client abcicli.Client) error {
|
||||
power := tmrand.Int()
|
||||
vals[i] = types.UpdateValidator(pubkey, int64(power), "")
|
||||
}
|
||||
_, err := client.InitChainSync(ctx, types.RequestInitChain{
|
||||
_, err := client.InitChainSync(types.RequestInitChain{
|
||||
Validators: vals,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -32,8 +29,19 @@ func InitChain(client abcicli.Client) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetOption(client abcicli.Client, key, value string) error {
|
||||
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value})
|
||||
if err != nil {
|
||||
fmt.Println("Failed test: SetOption")
|
||||
fmt.Printf("error while setting %v=%v: \nerror: %v\n", key, value, err)
|
||||
return err
|
||||
}
|
||||
fmt.Println("Passed test: SetOption")
|
||||
return nil
|
||||
}
|
||||
|
||||
func Commit(client abcicli.Client, hashExp []byte) error {
|
||||
res, err := client.CommitSync(ctx)
|
||||
res, err := client.CommitSync()
|
||||
data := res.Data
|
||||
if err != nil {
|
||||
fmt.Println("Failed test: Commit")
|
||||
@@ -50,7 +58,7 @@ func Commit(client abcicli.Client, hashExp []byte) error {
|
||||
}
|
||||
|
||||
func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
res, _ := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
@@ -69,7 +77,7 @@ func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []
|
||||
}
|
||||
|
||||
func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes})
|
||||
res, _ := client.CheckTxSync(types.RequestCheckTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: CheckTx")
|
||||
|
||||
@@ -1,56 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func startClient(abciType string) abcicli.Client {
|
||||
// Start client
|
||||
client, err := abcicli.NewClient("tcp://127.0.0.1:26658", abciType, true)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
client.SetLogger(logger.With("module", "abcicli"))
|
||||
if err := client.Start(); err != nil {
|
||||
panicf("connecting to abci_app: %v", err.Error())
|
||||
}
|
||||
|
||||
return client
|
||||
}
|
||||
|
||||
func commit(client abcicli.Client, hashExp []byte) {
|
||||
res, err := client.CommitSync(ctx)
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if !bytes.Equal(res.Data, hashExp) {
|
||||
panicf("Commit hash was unexpected. Got %X expected %X", res.Data, hashExp)
|
||||
}
|
||||
}
|
||||
|
||||
func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
|
||||
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if res.Code != codeExp {
|
||||
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", res.Code, codeExp, res.Log)
|
||||
}
|
||||
if !bytes.Equal(res.Data, dataExp) {
|
||||
panicf("DeliverTx response data was unexpected. Got %X expected %X", res.Data, dataExp)
|
||||
}
|
||||
}
|
||||
|
||||
func panicf(format string, a ...interface{}) {
|
||||
panic(fmt.Sprintf(format, a...))
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
var abciType string
|
||||
|
||||
func init() {
|
||||
abciType = os.Getenv("ABCI")
|
||||
if abciType == "" {
|
||||
abciType = "socket"
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
testCounter()
|
||||
}
|
||||
|
||||
const (
|
||||
maxABCIConnectTries = 10
|
||||
)
|
||||
|
||||
func ensureABCIIsUp(typ string, n int) error {
|
||||
var err error
|
||||
cmdString := "abci-cli echo hello"
|
||||
if typ == "grpc" {
|
||||
cmdString = "abci-cli --abci grpc echo hello"
|
||||
}
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
cmd := exec.Command("bash", "-c", cmdString)
|
||||
_, err = cmd.CombinedOutput()
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func testCounter() {
|
||||
abciApp := os.Getenv("ABCI_APP")
|
||||
if abciApp == "" {
|
||||
panic("No ABCI_APP specified")
|
||||
}
|
||||
|
||||
fmt.Printf("Running %s test with abci=%s\n", abciApp, abciType)
|
||||
subCommand := fmt.Sprintf("abci-cli %s", abciApp)
|
||||
cmd := exec.Command("bash", "-c", subCommand)
|
||||
cmd.Stdout = os.Stdout
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatalf("starting %q err: %v", abciApp, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := cmd.Process.Kill(); err != nil {
|
||||
log.Printf("error on process kill: %v", err)
|
||||
}
|
||||
if err := cmd.Wait(); err != nil {
|
||||
log.Printf("error while waiting for cmd to exit: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil {
|
||||
log.Fatalf("echo failed: %v", err) //nolint:gocritic
|
||||
}
|
||||
|
||||
client := startClient(abciType)
|
||||
defer func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
log.Printf("error trying client stop: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// commit(client, nil)
|
||||
// deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
|
||||
commit(client, nil)
|
||||
deliverTx(client, []byte{0x00}, types.CodeTypeOK, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
|
||||
// deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
|
||||
deliverTx(client, []byte{0x01}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x02}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x03}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x00, 0x04}, types.CodeTypeOK, nil)
|
||||
// deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
#! /bin/bash
|
||||
set -e
|
||||
|
||||
# These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it
|
||||
|
||||
# Get the directory of where this script is.
|
||||
export PATH="$GOBIN:$PATH"
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
|
||||
# Change into that dir because we expect that.
|
||||
cd "$DIR"
|
||||
|
||||
echo "RUN COUNTER OVER SOCKET"
|
||||
# test golang counter
|
||||
ABCI_APP="counter" go run -mod=readonly ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
|
||||
echo "RUN COUNTER OVER GRPC"
|
||||
# test golang counter via grpc
|
||||
ABCI_APP="counter --abci=grpc" ABCI="grpc" go run -mod=readonly ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
# test nodejs counter
|
||||
# TODO: fix node app
|
||||
#ABCI_APP="node $GOPATH/src/github.com/tendermint/js-abci/example/app.js" go test -test.run TestCounter
|
||||
@@ -1,3 +1,4 @@
|
||||
set_option serial on
|
||||
check_tx 0x00
|
||||
check_tx 0xff
|
||||
deliver_tx 0x00
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
> set_option serial on
|
||||
-> code: OK
|
||||
-> log: OK (SetOption doesn't return anything.)
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: OK
|
||||
|
||||
@@ -8,16 +12,18 @@
|
||||
-> code: OK
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: OK
|
||||
-> code: 2
|
||||
-> log: Invalid nonce. Expected >= 1, got 0
|
||||
|
||||
> deliver_tx 0x01
|
||||
-> code: OK
|
||||
|
||||
> deliver_tx 0x04
|
||||
-> code: OK
|
||||
-> code: 2
|
||||
-> log: Invalid nonce. Expected 2, got 4
|
||||
|
||||
> info
|
||||
-> code: OK
|
||||
-> data: {"hashes":0,"txs":3}
|
||||
-> data.hex: 0x7B22686173686573223A302C22747873223A337D
|
||||
-> data: {"hashes":0,"txs":2}
|
||||
-> data.hex: 0x7B22686173686573223A302C22747873223A327D
|
||||
|
||||
|
||||
@@ -37,7 +37,6 @@ function testExample() {
|
||||
}
|
||||
|
||||
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
|
||||
testExample 2 tests/test_cli/ex2.abci abci-cli counter
|
||||
|
||||
echo ""
|
||||
echo "PASS"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"context"
|
||||
context "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Application is an interface that enables any finite, deterministic state machine
|
||||
@@ -10,8 +10,9 @@ import (
|
||||
// except CheckTx/DeliverTx, which take `tx []byte`, and `Commit`, which takes nothing.
|
||||
type Application interface {
|
||||
// Info/Query Connection
|
||||
Info(RequestInfo) ResponseInfo // Return application info
|
||||
Query(RequestQuery) ResponseQuery // Query for state
|
||||
Info(RequestInfo) ResponseInfo // Return application info
|
||||
SetOption(RequestSetOption) ResponseSetOption // Set application option
|
||||
Query(RequestQuery) ResponseQuery // Query for state
|
||||
|
||||
// Mempool Connection
|
||||
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
|
||||
@@ -46,6 +47,10 @@ func (BaseApplication) Info(req RequestInfo) ResponseInfo {
|
||||
return ResponseInfo{}
|
||||
}
|
||||
|
||||
func (BaseApplication) SetOption(req RequestSetOption) ResponseSetOption {
|
||||
return ResponseSetOption{}
|
||||
}
|
||||
|
||||
func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx {
|
||||
return ResponseDeliverTx{Code: CodeTypeOK}
|
||||
}
|
||||
@@ -114,6 +119,11 @@ func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*Respon
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) SetOption(ctx context.Context, req *RequestSetOption) (*ResponseSetOption, error) {
|
||||
res := app.app.SetOption(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
|
||||
res := app.app.DeliverTx(*req)
|
||||
return &res, nil
|
||||
|
||||
@@ -48,6 +48,12 @@ func ToRequestInfo(req RequestInfo) *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestSetOption(req RequestSetOption) *Request {
|
||||
return &Request{
|
||||
Value: &Request_SetOption{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestDeliverTx(req RequestDeliverTx) *Request {
|
||||
return &Request{
|
||||
Value: &Request_DeliverTx{&req},
|
||||
@@ -139,6 +145,13 @@ func ToResponseInfo(res ResponseInfo) *Response {
|
||||
Value: &Response_Info{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseSetOption(res ResponseSetOption) *Response {
|
||||
return &Response{
|
||||
Value: &Response_SetOption{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseDeliverTx(res ResponseDeliverTx) *Response {
|
||||
return &Response{
|
||||
Value: &Response_DeliverTx{&res},
|
||||
|
||||
@@ -42,7 +42,7 @@ func (r ResponseQuery) IsErr() bool {
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// override JSON marshalling so we emit defaults (ie. disable omitempty)
|
||||
// override JSON marshaling so we emit defaults (ie. disable omitempty)
|
||||
|
||||
var (
|
||||
jsonpbMarshaller = jsonpb.Marshaler{
|
||||
@@ -52,6 +52,16 @@ var (
|
||||
jsonpbUnmarshaller = jsonpb.Unmarshaler{}
|
||||
)
|
||||
|
||||
func (r *ResponseSetOption) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
}
|
||||
|
||||
func (r *ResponseSetOption) UnmarshalJSON(b []byte) error {
|
||||
reader := bytes.NewBuffer(b)
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
|
||||
func (r *ResponseCheckTx) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
@@ -116,5 +126,6 @@ var _ jsonRoundTripper = (*ResponseCommit)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseQuery)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseDeliverTx)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseCheckTx)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseSetOption)(nil)
|
||||
|
||||
var _ jsonRoundTripper = (*EventAttribute)(nil)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
12
appveyor.yml
Normal file
12
appveyor.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
version: 1.0.{build}
|
||||
configuration: Release
|
||||
platform:
|
||||
- x64
|
||||
- x86
|
||||
clone_folder: c:\go\path\src\github.com\tendermint\tendermint
|
||||
before_build:
|
||||
- cmd: set GOPATH=%GOROOT%\path
|
||||
- cmd: set PATH=%GOPATH%\bin;%PATH%
|
||||
build_script:
|
||||
- cmd: make test
|
||||
test: off
|
||||
@@ -1,42 +0,0 @@
|
||||
/*
|
||||
Package Behaviour provides a mechanism for reactors to report behaviour of peers.
|
||||
|
||||
Instead of a reactor calling the switch directly it will call the behaviour module which will
|
||||
handle the stoping and marking peer as good on behalf of the reactor.
|
||||
|
||||
There are four different behaviours a reactor can report.
|
||||
|
||||
1. bad message
|
||||
|
||||
type badMessage struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be stopped for an error
|
||||
|
||||
2. message out of order
|
||||
|
||||
type messageOutOfOrder struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be stopped for an error
|
||||
|
||||
3. consesnsus Vote
|
||||
|
||||
type consensusVote struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be marked as good
|
||||
|
||||
4. block part
|
||||
|
||||
type blockPart struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be marked as good
|
||||
|
||||
*/
|
||||
package behaviour
|
||||
@@ -1,49 +0,0 @@
|
||||
package behaviour
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// PeerBehaviour is a struct describing a behaviour a peer performed.
|
||||
// `peerID` identifies the peer and reason characterizes the specific
|
||||
// behaviour performed by the peer.
|
||||
type PeerBehaviour struct {
|
||||
peerID p2p.NodeID
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
type badMessage struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// BadMessage returns a badMessage PeerBehaviour.
|
||||
func BadMessage(peerID p2p.NodeID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: badMessage{explanation}}
|
||||
}
|
||||
|
||||
type messageOutOfOrder struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// MessageOutOfOrder returns a messagOutOfOrder PeerBehaviour.
|
||||
func MessageOutOfOrder(peerID p2p.NodeID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: messageOutOfOrder{explanation}}
|
||||
}
|
||||
|
||||
type consensusVote struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// ConsensusVote returns a consensusVote PeerBehaviour.
|
||||
func ConsensusVote(peerID p2p.NodeID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: consensusVote{explanation}}
|
||||
}
|
||||
|
||||
type blockPart struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// BlockPart returns blockPart PeerBehaviour.
|
||||
func BlockPart(peerID p2p.NodeID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: blockPart{explanation}}
|
||||
}
|
||||
@@ -1,86 +0,0 @@
|
||||
package behaviour
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// Reporter provides an interface for reactors to report the behaviour
|
||||
// of peers synchronously to other components.
|
||||
type Reporter interface {
|
||||
Report(behaviour PeerBehaviour) error
|
||||
}
|
||||
|
||||
// SwitchReporter reports peer behaviour to an internal Switch.
|
||||
type SwitchReporter struct {
|
||||
sw *p2p.Switch
|
||||
}
|
||||
|
||||
// NewSwitchReporter return a new SwitchReporter instance which wraps the Switch.
|
||||
func NewSwitchReporter(sw *p2p.Switch) *SwitchReporter {
|
||||
return &SwitchReporter{
|
||||
sw: sw,
|
||||
}
|
||||
}
|
||||
|
||||
// Report reports the behaviour of a peer to the Switch.
|
||||
func (spbr *SwitchReporter) Report(behaviour PeerBehaviour) error {
|
||||
peer := spbr.sw.Peers().Get(behaviour.peerID)
|
||||
if peer == nil {
|
||||
return errors.New("peer not found")
|
||||
}
|
||||
|
||||
switch reason := behaviour.reason.(type) {
|
||||
case consensusVote, blockPart:
|
||||
spbr.sw.MarkPeerAsGood(peer)
|
||||
case badMessage:
|
||||
spbr.sw.StopPeerForError(peer, reason.explanation)
|
||||
case messageOutOfOrder:
|
||||
spbr.sw.StopPeerForError(peer, reason.explanation)
|
||||
default:
|
||||
return errors.New("unknown reason reported")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MockReporter is a concrete implementation of the Reporter
|
||||
// interface used in reactor tests to ensure reactors report the correct
|
||||
// behaviour in manufactured scenarios.
|
||||
type MockReporter struct {
|
||||
mtx tmsync.RWMutex
|
||||
pb map[p2p.NodeID][]PeerBehaviour
|
||||
}
|
||||
|
||||
// NewMockReporter returns a Reporter which records all reported
|
||||
// behaviours in memory.
|
||||
func NewMockReporter() *MockReporter {
|
||||
return &MockReporter{
|
||||
pb: map[p2p.NodeID][]PeerBehaviour{},
|
||||
}
|
||||
}
|
||||
|
||||
// Report stores the PeerBehaviour produced by the peer identified by peerID.
|
||||
func (mpbr *MockReporter) Report(behaviour PeerBehaviour) error {
|
||||
mpbr.mtx.Lock()
|
||||
defer mpbr.mtx.Unlock()
|
||||
mpbr.pb[behaviour.peerID] = append(mpbr.pb[behaviour.peerID], behaviour)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBehaviours returns all behaviours reported on the peer identified by peerID.
|
||||
func (mpbr *MockReporter) GetBehaviours(peerID p2p.NodeID) []PeerBehaviour {
|
||||
mpbr.mtx.RLock()
|
||||
defer mpbr.mtx.RUnlock()
|
||||
if items, ok := mpbr.pb[peerID]; ok {
|
||||
result := make([]PeerBehaviour, len(items))
|
||||
copy(result, items)
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
return []PeerBehaviour{}
|
||||
}
|
||||
@@ -1,205 +0,0 @@
|
||||
package behaviour_test
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
bh "github.com/tendermint/tendermint/behaviour"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// TestMockReporter tests the MockReporter's ability to store reported
|
||||
// peer behaviour in memory indexed by the peerID.
|
||||
func TestMockReporter(t *testing.T) {
|
||||
var peerID p2p.NodeID = "MockPeer"
|
||||
pr := bh.NewMockReporter()
|
||||
|
||||
behaviours := pr.GetBehaviours(peerID)
|
||||
if len(behaviours) != 0 {
|
||||
t.Error("Expected to have no behaviours reported")
|
||||
}
|
||||
|
||||
badMessage := bh.BadMessage(peerID, "bad message")
|
||||
if err := pr.Report(badMessage); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
behaviours = pr.GetBehaviours(peerID)
|
||||
if len(behaviours) != 1 {
|
||||
t.Error("Expected the peer have one reported behaviour")
|
||||
}
|
||||
|
||||
if behaviours[0] != badMessage {
|
||||
t.Error("Expected Bad Message to have been reported")
|
||||
}
|
||||
}
|
||||
|
||||
type scriptItem struct {
|
||||
peerID p2p.NodeID
|
||||
behaviour bh.PeerBehaviour
|
||||
}
|
||||
|
||||
// equalBehaviours returns true if a and b contain the same PeerBehaviours with
|
||||
// the same freequencies and otherwise false.
|
||||
func equalBehaviours(a []bh.PeerBehaviour, b []bh.PeerBehaviour) bool {
|
||||
aHistogram := map[bh.PeerBehaviour]int{}
|
||||
bHistogram := map[bh.PeerBehaviour]int{}
|
||||
|
||||
for _, behaviour := range a {
|
||||
aHistogram[behaviour]++
|
||||
}
|
||||
|
||||
for _, behaviour := range b {
|
||||
bHistogram[behaviour]++
|
||||
}
|
||||
|
||||
if len(aHistogram) != len(bHistogram) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, behaviour := range a {
|
||||
if aHistogram[behaviour] != bHistogram[behaviour] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for _, behaviour := range b {
|
||||
if bHistogram[behaviour] != aHistogram[behaviour] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// TestEqualPeerBehaviours tests that equalBehaviours can tell that two slices
|
||||
// of peer behaviours can be compared for the behaviours they contain and the
|
||||
// freequencies that those behaviours occur.
|
||||
func TestEqualPeerBehaviours(t *testing.T) {
|
||||
var (
|
||||
peerID p2p.NodeID = "MockPeer"
|
||||
consensusVote = bh.ConsensusVote(peerID, "voted")
|
||||
blockPart = bh.BlockPart(peerID, "blocked")
|
||||
equals = []struct {
|
||||
left []bh.PeerBehaviour
|
||||
right []bh.PeerBehaviour
|
||||
}{
|
||||
// Empty sets
|
||||
{[]bh.PeerBehaviour{}, []bh.PeerBehaviour{}},
|
||||
// Single behaviours
|
||||
{[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{consensusVote}},
|
||||
// Equal Frequencies
|
||||
{[]bh.PeerBehaviour{consensusVote, consensusVote},
|
||||
[]bh.PeerBehaviour{consensusVote, consensusVote}},
|
||||
// Equal frequencies different orders
|
||||
{[]bh.PeerBehaviour{consensusVote, blockPart},
|
||||
[]bh.PeerBehaviour{blockPart, consensusVote}},
|
||||
}
|
||||
unequals = []struct {
|
||||
left []bh.PeerBehaviour
|
||||
right []bh.PeerBehaviour
|
||||
}{
|
||||
// Comparing empty sets to non empty sets
|
||||
{[]bh.PeerBehaviour{}, []bh.PeerBehaviour{consensusVote}},
|
||||
// Different behaviours
|
||||
{[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{blockPart}},
|
||||
// Same behaviour with different frequencies
|
||||
{[]bh.PeerBehaviour{consensusVote},
|
||||
[]bh.PeerBehaviour{consensusVote, consensusVote}},
|
||||
}
|
||||
)
|
||||
|
||||
for _, test := range equals {
|
||||
if !equalBehaviours(test.left, test.right) {
|
||||
t.Errorf("expected %#v and %#v to be equal", test.left, test.right)
|
||||
}
|
||||
}
|
||||
|
||||
for _, test := range unequals {
|
||||
if equalBehaviours(test.left, test.right) {
|
||||
t.Errorf("expected %#v and %#v to be unequal", test.left, test.right)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestPeerBehaviourConcurrency constructs a scenario in which
|
||||
// multiple goroutines are using the same MockReporter instance.
|
||||
// This test reproduces the conditions in which MockReporter will
|
||||
// be used within a Reactor `Receive` method tests to ensure thread safety.
|
||||
func TestMockPeerBehaviourReporterConcurrency(t *testing.T) {
|
||||
var (
|
||||
behaviourScript = []struct {
|
||||
peerID p2p.NodeID
|
||||
behaviours []bh.PeerBehaviour
|
||||
}{
|
||||
{"1", []bh.PeerBehaviour{bh.ConsensusVote("1", "")}},
|
||||
{"2", []bh.PeerBehaviour{bh.ConsensusVote("2", ""), bh.ConsensusVote("2", ""), bh.ConsensusVote("2", "")}},
|
||||
{
|
||||
"3",
|
||||
[]bh.PeerBehaviour{bh.BlockPart("3", ""),
|
||||
bh.ConsensusVote("3", ""),
|
||||
bh.BlockPart("3", ""),
|
||||
bh.ConsensusVote("3", "")}},
|
||||
{
|
||||
"4",
|
||||
[]bh.PeerBehaviour{bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", "")}},
|
||||
{
|
||||
"5",
|
||||
[]bh.PeerBehaviour{bh.BlockPart("5", ""),
|
||||
bh.ConsensusVote("5", ""),
|
||||
bh.BlockPart("5", ""),
|
||||
bh.ConsensusVote("5", "")}},
|
||||
}
|
||||
)
|
||||
|
||||
var receiveWg sync.WaitGroup
|
||||
pr := bh.NewMockReporter()
|
||||
scriptItems := make(chan scriptItem)
|
||||
done := make(chan int)
|
||||
numConsumers := 3
|
||||
for i := 0; i < numConsumers; i++ {
|
||||
receiveWg.Add(1)
|
||||
go func() {
|
||||
defer receiveWg.Done()
|
||||
for {
|
||||
select {
|
||||
case pb := <-scriptItems:
|
||||
if err := pr.Report(pb.behaviour); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
var sendingWg sync.WaitGroup
|
||||
sendingWg.Add(1)
|
||||
go func() {
|
||||
defer sendingWg.Done()
|
||||
for _, item := range behaviourScript {
|
||||
for _, reason := range item.behaviours {
|
||||
scriptItems <- scriptItem{item.peerID, reason}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
sendingWg.Wait()
|
||||
|
||||
for i := 0; i < numConsumers; i++ {
|
||||
done <- 1
|
||||
}
|
||||
|
||||
receiveWg.Wait()
|
||||
|
||||
for _, items := range behaviourScript {
|
||||
reported := pr.GetBehaviours(items.peerID)
|
||||
if !equalBehaviours(reported, items.behaviours) {
|
||||
t.Errorf("expected peer %s to have behaved \nExpected: %#v \nGot %#v \n",
|
||||
items.peerID, items.behaviours, reported)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
/*
|
||||
Package blockchain provides two implementations of the fast-sync protocol.
|
||||
|
||||
- v0 was the very first implementation. it's battle tested, but does not have a
|
||||
lot of test coverage.
|
||||
- v2 is the newest implementation, with a focus on testability and readability.
|
||||
|
||||
Check out ADR-40 for the formal model and requirements.
|
||||
|
||||
# Termination criteria
|
||||
|
||||
1. the maximum peer height is reached
|
||||
2. termination timeout is triggered, which is set if the peer set is empty or
|
||||
there are no pending requests.
|
||||
|
||||
*/
|
||||
package blockchain
|
||||
@@ -1,12 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxMsgSize = types.MaxBlockSizeBytes +
|
||||
bcproto.BlockResponseMessagePrefixSize +
|
||||
bcproto.BlockResponseMessageFieldKeySize
|
||||
)
|
||||
@@ -1,580 +0,0 @@
|
||||
package v0
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
_ service.Service = (*Reactor)(nil)
|
||||
|
||||
// ChannelShims contains a map of ChannelDescriptorShim objects, where each
|
||||
// object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding
|
||||
// p2p proto.Message the new p2p Channel is responsible for handling.
|
||||
//
|
||||
//
|
||||
// TODO: Remove once p2p refactor is complete.
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{
|
||||
BlockchainChannel: {
|
||||
MsgType: new(bcproto.Message),
|
||||
Descriptor: &p2p.ChannelDescriptor{
|
||||
ID: byte(BlockchainChannel),
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 1000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockchainChannel is a channel for blocks and status updates
|
||||
BlockchainChannel = p2p.ChannelID(0x40)
|
||||
|
||||
trySyncIntervalMS = 10
|
||||
|
||||
// ask for best height every 10s
|
||||
statusUpdateIntervalSeconds = 10
|
||||
|
||||
// check if we should switch to consensus reactor
|
||||
switchToConsensusIntervalSeconds = 1
|
||||
|
||||
// switch to consensus after this duration of inactivity
|
||||
syncTimeout = 60 * time.Second
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// For when we switch from blockchain reactor and fast sync to the consensus
|
||||
// machine.
|
||||
SwitchToConsensus(state sm.State, skipWAL bool)
|
||||
}
|
||||
|
||||
type peerError struct {
|
||||
err error
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (e peerError) Error() string {
|
||||
return fmt.Sprintf("error with peer %v: %s", e.peerID, e.err.Error())
|
||||
}
|
||||
|
||||
// BlockchainReactor handles long-term catchup syncing.
|
||||
type Reactor struct {
|
||||
service.BaseService
|
||||
|
||||
// immutable
|
||||
initialState sm.State
|
||||
|
||||
blockExec *sm.BlockExecutor
|
||||
store *store.BlockStore
|
||||
pool *BlockPool
|
||||
consReactor consensusReactor
|
||||
fastSync bool
|
||||
|
||||
blockchainCh *p2p.Channel
|
||||
peerUpdates *p2p.PeerUpdatesCh
|
||||
closeCh chan struct{}
|
||||
|
||||
requestsCh <-chan BlockRequest
|
||||
errorsCh <-chan peerError
|
||||
|
||||
// poolWG is used to synchronize the graceful shutdown of the poolRoutine and
|
||||
// requestRoutine spawned goroutines when stopping the reactor and before
|
||||
// stopping the p2p Channel(s).
|
||||
poolWG sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewReactor returns new reactor instance.
|
||||
func NewReactor(
|
||||
logger log.Logger,
|
||||
state sm.State,
|
||||
blockExec *sm.BlockExecutor,
|
||||
store *store.BlockStore,
|
||||
consReactor consensusReactor,
|
||||
blockchainCh *p2p.Channel,
|
||||
peerUpdates *p2p.PeerUpdatesCh,
|
||||
fastSync bool,
|
||||
) (*Reactor, error) {
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height())
|
||||
}
|
||||
|
||||
startHeight := store.Height() + 1
|
||||
if startHeight == 1 {
|
||||
startHeight = state.InitialHeight
|
||||
}
|
||||
|
||||
requestsCh := make(chan BlockRequest, maxTotalRequesters)
|
||||
errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count.
|
||||
|
||||
r := &Reactor{
|
||||
initialState: state,
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
|
||||
consReactor: consReactor,
|
||||
fastSync: fastSync,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
blockchainCh: blockchainCh,
|
||||
peerUpdates: peerUpdates,
|
||||
closeCh: make(chan struct{}),
|
||||
}
|
||||
|
||||
r.BaseService = *service.NewBaseService(logger, "Blockchain", r)
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// OnStart starts separate go routines for each p2p Channel and listens for
|
||||
// envelopes on each. In addition, it also listens for peer updates and handles
|
||||
// messages on that p2p channel accordingly. The caller must be sure to execute
|
||||
// OnStop to ensure the outbound p2p Channels are closed.
|
||||
//
|
||||
// If fastSync is enabled, we also start the pool and the pool processing
|
||||
// goroutine. If the pool fails to start, an error is returned.
|
||||
func (r *Reactor) OnStart() error {
|
||||
if r.fastSync {
|
||||
if err := r.pool.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.poolWG.Add(1)
|
||||
go r.poolRoutine(false)
|
||||
}
|
||||
|
||||
go r.processBlockchainCh()
|
||||
go r.processPeerUpdates()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop stops the reactor by signaling to all spawned goroutines to exit and
|
||||
// blocking until they all exit.
|
||||
func (r *Reactor) OnStop() {
|
||||
if r.fastSync {
|
||||
if err := r.pool.Stop(); err != nil {
|
||||
r.Logger.Error("failed to stop pool", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// wait for the poolRoutine and requestRoutine goroutines to gracefully exit
|
||||
r.poolWG.Wait()
|
||||
|
||||
// Close closeCh to signal to all spawned goroutines to gracefully exit. All
|
||||
// p2p Channels should execute Close().
|
||||
close(r.closeCh)
|
||||
|
||||
// Wait for all p2p Channels to be closed before returning. This ensures we
|
||||
// can easily reason about synchronization of all p2p Channels and ensure no
|
||||
// panics will occur.
|
||||
<-r.blockchainCh.Done()
|
||||
<-r.peerUpdates.Done()
|
||||
}
|
||||
|
||||
// respondToPeer loads a block and sends it to the requesting peer, if we have it.
|
||||
// Otherwise, we'll respond saying we do not have it.
|
||||
func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID p2p.NodeID) {
|
||||
block := r.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
blockProto, err := block.ToProto()
|
||||
if err != nil {
|
||||
r.Logger.Error("failed to convert msg to protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
To: peerID,
|
||||
Message: &bcproto.BlockResponse{Block: blockProto},
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
r.Logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height)
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
To: peerID,
|
||||
Message: &bcproto.NoBlockResponse{Height: msg.Height},
|
||||
}
|
||||
}
|
||||
|
||||
// handleBlockchainMessage handles envelopes sent from peers on the
|
||||
// BlockchainChannel. It returns an error only if the Envelope.Message is unknown
|
||||
// for this channel. This should never be called outside of handleMessage.
|
||||
func (r *Reactor) handleBlockchainMessage(envelope p2p.Envelope) error {
|
||||
logger := r.Logger.With("peer", envelope.From)
|
||||
|
||||
switch msg := envelope.Message.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
r.respondToPeer(msg, envelope.From)
|
||||
|
||||
case *bcproto.BlockResponse:
|
||||
block, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
logger.Error("failed to convert block from proto", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
r.pool.AddBlock(envelope.From, block, block.Size())
|
||||
|
||||
case *bcproto.StatusRequest:
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
To: envelope.From,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Height: r.store.Height(),
|
||||
Base: r.store.Base(),
|
||||
},
|
||||
}
|
||||
|
||||
case *bcproto.StatusResponse:
|
||||
r.pool.SetPeerRange(envelope.From, msg.Base, msg.Height)
|
||||
|
||||
case *bcproto.NoBlockResponse:
|
||||
logger.Debug("peer does not have the requested block", "height", msg.Height)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("received unknown message: %T", msg)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleMessage handles an Envelope sent from a peer on a specific p2p Channel.
|
||||
// It will handle errors and any possible panics gracefully. A caller can handle
|
||||
// any error returned by sending a PeerError on the respective channel.
|
||||
func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error("recovering from processing message panic", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
r.Logger.Debug("received message", "message", envelope.Message, "peer", envelope.From)
|
||||
|
||||
switch chID {
|
||||
case BlockchainChannel:
|
||||
err = r.handleBlockchainMessage(envelope)
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// processBlockchainCh initiates a blocking process where we listen for and handle
|
||||
// envelopes on the BlockchainChannel. Any error encountered during message
|
||||
// execution will result in a PeerError being sent on the BlockchainChannel. When
|
||||
// the reactor is stopped, we will catch the signal and close the p2p Channel
|
||||
// gracefully.
|
||||
func (r *Reactor) processBlockchainCh() {
|
||||
defer r.blockchainCh.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case envelope := <-r.blockchainCh.In():
|
||||
if err := r.handleMessage(r.blockchainCh.ID(), envelope); err != nil {
|
||||
r.Logger.Error("failed to process message", "ch_id", r.blockchainCh.ID(), "envelope", envelope, "err", err)
|
||||
r.blockchainCh.Error() <- p2p.PeerError{
|
||||
PeerID: envelope.From,
|
||||
Err: err,
|
||||
Severity: p2p.PeerErrorSeverityLow,
|
||||
}
|
||||
}
|
||||
|
||||
case <-r.closeCh:
|
||||
r.Logger.Debug("stopped listening on blockchain channel; closing...")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processPeerUpdate processes a PeerUpdate.
|
||||
func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
|
||||
r.Logger.Debug("received peer update", "peer", peerUpdate.PeerID, "status", peerUpdate.Status)
|
||||
|
||||
// XXX: Pool#RedoRequest can sometimes give us an empty peer.
|
||||
if len(peerUpdate.PeerID) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
switch peerUpdate.Status {
|
||||
case p2p.PeerStatusNew, p2p.PeerStatusUp:
|
||||
// send a status update the newly added peer
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
To: peerUpdate.PeerID,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: r.store.Base(),
|
||||
Height: r.store.Height(),
|
||||
},
|
||||
}
|
||||
|
||||
case p2p.PeerStatusDown, p2p.PeerStatusRemoved, p2p.PeerStatusBanned:
|
||||
r.pool.RemovePeer(peerUpdate.PeerID)
|
||||
}
|
||||
}
|
||||
|
||||
// processPeerUpdates initiates a blocking process where we listen for and handle
|
||||
// PeerUpdate messages. When the reactor is stopped, we will catch the signal and
|
||||
// close the p2p PeerUpdatesCh gracefully.
|
||||
func (r *Reactor) processPeerUpdates() {
|
||||
defer r.peerUpdates.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case peerUpdate := <-r.peerUpdates.Updates():
|
||||
r.processPeerUpdate(peerUpdate)
|
||||
|
||||
case <-r.closeCh:
|
||||
r.Logger.Debug("stopped listening on peer updates channel; closing...")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SwitchToFastSync is called by the state sync reactor when switching to fast
|
||||
// sync.
|
||||
func (r *Reactor) SwitchToFastSync(state sm.State) error {
|
||||
r.fastSync = true
|
||||
r.initialState = state
|
||||
r.pool.height = state.LastBlockHeight + 1
|
||||
|
||||
if err := r.pool.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.poolWG.Add(1)
|
||||
go r.poolRoutine(true)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reactor) requestRoutine() {
|
||||
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
|
||||
defer statusUpdateTicker.Stop()
|
||||
|
||||
r.poolWG.Add(1)
|
||||
defer r.poolWG.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-r.closeCh:
|
||||
return
|
||||
|
||||
case <-r.pool.Quit():
|
||||
return
|
||||
|
||||
case request := <-r.requestsCh:
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
To: request.PeerID,
|
||||
Message: &bcproto.BlockRequest{Height: request.Height},
|
||||
}
|
||||
|
||||
case pErr := <-r.errorsCh:
|
||||
r.blockchainCh.Error() <- p2p.PeerError{
|
||||
PeerID: pErr.peerID,
|
||||
Err: pErr.err,
|
||||
Severity: p2p.PeerErrorSeverityLow,
|
||||
}
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
r.poolWG.Add(1)
|
||||
|
||||
go func() {
|
||||
defer r.poolWG.Done()
|
||||
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
Broadcast: true,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// poolRoutine handles messages from the poolReactor telling the reactor what to
|
||||
// do.
|
||||
//
|
||||
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
|
||||
func (r *Reactor) poolRoutine(stateSynced bool) {
|
||||
var (
|
||||
trySyncTicker = time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
switchToConsensusTicker = time.NewTicker(switchToConsensusIntervalSeconds * time.Second)
|
||||
|
||||
blocksSynced = uint64(0)
|
||||
|
||||
chainID = r.initialState.ChainID
|
||||
state = r.initialState
|
||||
|
||||
lastHundred = time.Now()
|
||||
lastRate = 0.0
|
||||
|
||||
didProcessCh = make(chan struct{}, 1)
|
||||
)
|
||||
|
||||
defer trySyncTicker.Stop()
|
||||
defer switchToConsensusTicker.Stop()
|
||||
|
||||
go r.requestRoutine()
|
||||
|
||||
defer r.poolWG.Done()
|
||||
|
||||
FOR_LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-switchToConsensusTicker.C:
|
||||
var (
|
||||
height, numPending, lenRequesters = r.pool.GetStatus()
|
||||
lastAdvance = r.pool.LastAdvance()
|
||||
)
|
||||
|
||||
r.Logger.Debug(
|
||||
"consensus ticker",
|
||||
"num_pending", numPending,
|
||||
"total", lenRequesters,
|
||||
"height", height,
|
||||
)
|
||||
|
||||
switch {
|
||||
case r.pool.IsCaughtUp():
|
||||
r.Logger.Info("switching to consensus reactor", "height", height)
|
||||
|
||||
case time.Since(lastAdvance) > syncTimeout:
|
||||
r.Logger.Error("no progress since last advance", "last_advance", lastAdvance)
|
||||
|
||||
default:
|
||||
r.Logger.Info(
|
||||
"not caught up yet",
|
||||
"height", height,
|
||||
"max_peer_height", r.pool.MaxPeerHeight(),
|
||||
"timeout_in", syncTimeout-time.Since(lastAdvance),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
if err := r.pool.Stop(); err != nil {
|
||||
r.Logger.Error("failed to stop pool", "err", err)
|
||||
}
|
||||
|
||||
if r.consReactor != nil {
|
||||
r.consReactor.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
|
||||
}
|
||||
|
||||
break FOR_LOOP
|
||||
|
||||
case <-trySyncTicker.C:
|
||||
select {
|
||||
case didProcessCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
case <-didProcessCh:
|
||||
// NOTE: It is a subtle mistake to process more than a single block at a
|
||||
// time (e.g. 10) here, because we only send one BlockRequest per loop
|
||||
// iteration. The ratio mismatch can result in starving of blocks, i.e. a
|
||||
// sudden burst of requests and responses, and repeat. Consequently, it is
|
||||
// better to split these routines rather than coupling them as it is
|
||||
// written here.
|
||||
//
|
||||
// TODO: Uncouple from request routine.
|
||||
|
||||
// see if there are any blocks to sync
|
||||
first, second := r.pool.PeekTwoBlocks()
|
||||
if first == nil || second == nil {
|
||||
// we need both to sync the first block
|
||||
continue FOR_LOOP
|
||||
} else {
|
||||
// try again quickly next loop
|
||||
didProcessCh <- struct{}{}
|
||||
}
|
||||
|
||||
var (
|
||||
firstParts = first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstPartSetHeader = firstParts.Header()
|
||||
firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
|
||||
)
|
||||
|
||||
// Finally, verify the first block using the second's commit.
|
||||
//
|
||||
// NOTE: We can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
err := state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("invalid last commit: %w", err)
|
||||
r.Logger.Error(
|
||||
err.Error(),
|
||||
"last_commit", second.LastCommit,
|
||||
"block_id", firstID,
|
||||
"height", first.Height,
|
||||
)
|
||||
|
||||
// NOTE: We've already removed the peer's request, but we still need
|
||||
// to clean up the rest.
|
||||
peerID := r.pool.RedoRequest(first.Height)
|
||||
r.blockchainCh.Error() <- p2p.PeerError{
|
||||
PeerID: peerID,
|
||||
Err: err,
|
||||
Severity: p2p.PeerErrorSeverityLow,
|
||||
}
|
||||
|
||||
peerID2 := r.pool.RedoRequest(second.Height)
|
||||
if peerID2 != peerID {
|
||||
r.blockchainCh.Error() <- p2p.PeerError{
|
||||
PeerID: peerID2,
|
||||
Err: err,
|
||||
Severity: p2p.PeerErrorSeverityLow,
|
||||
}
|
||||
}
|
||||
|
||||
continue FOR_LOOP
|
||||
} else {
|
||||
r.pool.PopRequest()
|
||||
|
||||
// TODO: batch saves so we do not persist to disk every block
|
||||
r.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
var err error
|
||||
|
||||
// TODO: Same thing for app - but we would need a way to get the hash
|
||||
// without persisting the state.
|
||||
state, _, err = r.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO: This is bad, are we zombie?
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
|
||||
blocksSynced++
|
||||
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
r.Logger.Info(
|
||||
"fast sync rate",
|
||||
"height", r.pool.height,
|
||||
"max_peer_height", r.pool.MaxPeerHeight(),
|
||||
"blocks/s", lastRate,
|
||||
)
|
||||
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
continue FOR_LOOP
|
||||
|
||||
case <-r.closeCh:
|
||||
break FOR_LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,405 +0,0 @@
|
||||
package v0
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mempool/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
var rng = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
type reactorTestSuite struct {
|
||||
reactor *Reactor
|
||||
app proxy.AppConns
|
||||
|
||||
peerID p2p.NodeID
|
||||
|
||||
blockchainChannel *p2p.Channel
|
||||
blockchainInCh chan p2p.Envelope
|
||||
blockchainOutCh chan p2p.Envelope
|
||||
blockchainPeerErrCh chan p2p.PeerError
|
||||
|
||||
peerUpdatesCh chan p2p.PeerUpdate
|
||||
peerUpdates *p2p.PeerUpdatesCh
|
||||
}
|
||||
|
||||
func setup(
|
||||
t *testing.T,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64,
|
||||
chBuf uint,
|
||||
) *reactorTestSuite {
|
||||
t.Helper()
|
||||
|
||||
require.Len(t, privVals, 1, "only one validator can be supported")
|
||||
|
||||
app := &abci.BaseApplication{}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
require.NoError(t, proxyApp.Start())
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
require.NoError(t, err)
|
||||
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db)
|
||||
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
log.TestingLogger(),
|
||||
proxyApp.Consensus(),
|
||||
mock.Mempool{},
|
||||
sm.EmptyEvidencePool{},
|
||||
)
|
||||
require.NoError(t, stateStore.Save(state))
|
||||
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil)
|
||||
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
|
||||
vote, err := types.MakeVote(
|
||||
lastBlock.Header.Height,
|
||||
lastBlockMeta.BlockID,
|
||||
state.Validators,
|
||||
privVals[0],
|
||||
lastBlock.Header.ChainID,
|
||||
time.Now(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
lastCommit = types.NewCommit(
|
||||
vote.Height,
|
||||
vote.Round,
|
||||
lastBlockMeta.BlockID,
|
||||
[]types.CommitSig{vote.CommitSig()},
|
||||
)
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
|
||||
pID := make([]byte, 16)
|
||||
_, err = rng.Read(pID)
|
||||
require.NoError(t, err)
|
||||
|
||||
peerUpdatesCh := make(chan p2p.PeerUpdate, chBuf)
|
||||
|
||||
rts := &reactorTestSuite{
|
||||
app: proxyApp,
|
||||
blockchainInCh: make(chan p2p.Envelope, chBuf),
|
||||
blockchainOutCh: make(chan p2p.Envelope, chBuf),
|
||||
blockchainPeerErrCh: make(chan p2p.PeerError, chBuf),
|
||||
peerUpdatesCh: peerUpdatesCh,
|
||||
peerUpdates: p2p.NewPeerUpdates(peerUpdatesCh),
|
||||
peerID: p2p.NodeID(fmt.Sprintf("%x", pID)),
|
||||
}
|
||||
|
||||
rts.blockchainChannel = p2p.NewChannel(
|
||||
BlockchainChannel,
|
||||
new(bcproto.Message),
|
||||
rts.blockchainInCh,
|
||||
rts.blockchainOutCh,
|
||||
rts.blockchainPeerErrCh,
|
||||
)
|
||||
|
||||
reactor, err := NewReactor(
|
||||
log.TestingLogger().With("module", "blockchain", "node", rts.peerID),
|
||||
state.Copy(),
|
||||
blockExec,
|
||||
blockStore,
|
||||
nil,
|
||||
rts.blockchainChannel,
|
||||
rts.peerUpdates,
|
||||
fastSync,
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
rts.reactor = reactor
|
||||
|
||||
require.NoError(t, rts.reactor.Start())
|
||||
require.True(t, rts.reactor.IsRunning())
|
||||
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, rts.reactor.Stop())
|
||||
require.NoError(t, rts.app.Stop())
|
||||
require.False(t, rts.reactor.IsRunning())
|
||||
})
|
||||
|
||||
return rts
|
||||
}
|
||||
|
||||
func simulateRouter(primary *reactorTestSuite, suites []*reactorTestSuite, dropChErr bool) {
|
||||
// create a mapping for efficient suite lookup by peer ID
|
||||
suitesByPeerID := make(map[p2p.NodeID]*reactorTestSuite)
|
||||
for _, suite := range suites {
|
||||
suitesByPeerID[suite.peerID] = suite
|
||||
}
|
||||
|
||||
// Simulate a router by listening for all outbound envelopes and proxying the
|
||||
// envelope to the respective peer (suite).
|
||||
go func() {
|
||||
for envelope := range primary.blockchainOutCh {
|
||||
if envelope.Broadcast {
|
||||
for _, s := range suites {
|
||||
// broadcast to everyone except source
|
||||
if s.peerID != primary.peerID {
|
||||
s.blockchainInCh <- p2p.Envelope{
|
||||
From: primary.peerID,
|
||||
To: s.peerID,
|
||||
Message: envelope.Message,
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
suitesByPeerID[envelope.To].blockchainInCh <- p2p.Envelope{
|
||||
From: primary.peerID,
|
||||
To: envelope.To,
|
||||
Message: envelope.Message,
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for pErr := range primary.blockchainPeerErrCh {
|
||||
if dropChErr {
|
||||
primary.reactor.Logger.Debug("dropped peer error", "err", pErr.Err)
|
||||
} else {
|
||||
primary.peerUpdatesCh <- p2p.PeerUpdate{
|
||||
PeerID: pErr.PeerID,
|
||||
Status: p2p.PeerStatusRemoved,
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func TestReactor_AbruptDisconnect(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
genDoc, privVals := randGenesisDoc(config, 1, false, 30)
|
||||
maxBlockHeight := int64(64)
|
||||
testSuites := []*reactorTestSuite{
|
||||
setup(t, genDoc, privVals, maxBlockHeight, 0),
|
||||
setup(t, genDoc, privVals, 0, 0),
|
||||
}
|
||||
|
||||
require.Equal(t, maxBlockHeight, testSuites[0].reactor.store.Height())
|
||||
|
||||
for _, s := range testSuites {
|
||||
simulateRouter(s, testSuites, true)
|
||||
|
||||
// connect reactor to every other reactor
|
||||
for _, ss := range testSuites {
|
||||
if s.peerID != ss.peerID {
|
||||
s.peerUpdatesCh <- p2p.PeerUpdate{
|
||||
Status: p2p.PeerStatusUp,
|
||||
PeerID: ss.peerID,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
secondaryPool := testSuites[1].reactor.pool
|
||||
require.Eventually(
|
||||
t,
|
||||
func() bool {
|
||||
height, _, _ := secondaryPool.GetStatus()
|
||||
return secondaryPool.MaxPeerHeight() > 0 && height > 0 && height < 10
|
||||
},
|
||||
10*time.Second,
|
||||
10*time.Millisecond,
|
||||
"expected node to be partially synced",
|
||||
)
|
||||
|
||||
// Remove synced node from the syncing node which should not result in any
|
||||
// deadlocks or race conditions within the context of poolRoutine.
|
||||
testSuites[1].peerUpdatesCh <- p2p.PeerUpdate{
|
||||
Status: p2p.PeerStatusDown,
|
||||
PeerID: testSuites[0].peerID,
|
||||
}
|
||||
}
|
||||
|
||||
func TestReactor_NoBlockResponse(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
genDoc, privVals := randGenesisDoc(config, 1, false, 30)
|
||||
maxBlockHeight := int64(65)
|
||||
testSuites := []*reactorTestSuite{
|
||||
setup(t, genDoc, privVals, maxBlockHeight, 0),
|
||||
setup(t, genDoc, privVals, 0, 0),
|
||||
}
|
||||
|
||||
require.Equal(t, maxBlockHeight, testSuites[0].reactor.store.Height())
|
||||
|
||||
for _, s := range testSuites {
|
||||
simulateRouter(s, testSuites, true)
|
||||
|
||||
// connect reactor to every other reactor
|
||||
for _, ss := range testSuites {
|
||||
if s.peerID != ss.peerID {
|
||||
s.peerUpdatesCh <- p2p.PeerUpdate{
|
||||
Status: p2p.PeerStatusUp,
|
||||
PeerID: ss.peerID,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
height int64
|
||||
existent bool
|
||||
}{
|
||||
{maxBlockHeight + 2, false},
|
||||
{10, true},
|
||||
{1, true},
|
||||
{100, false},
|
||||
}
|
||||
|
||||
secondaryPool := testSuites[1].reactor.pool
|
||||
require.Eventually(
|
||||
t,
|
||||
func() bool { return secondaryPool.MaxPeerHeight() > 0 && secondaryPool.IsCaughtUp() },
|
||||
10*time.Second,
|
||||
10*time.Millisecond,
|
||||
"expected node to be fully synced",
|
||||
)
|
||||
|
||||
for _, tc := range testCases {
|
||||
block := testSuites[1].reactor.store.LoadBlock(tc.height)
|
||||
if tc.existent {
|
||||
require.True(t, block != nil)
|
||||
} else {
|
||||
require.Nil(t, block)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReactor_BadBlockStopsPeer(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
maxBlockHeight := int64(48)
|
||||
genDoc, privVals := randGenesisDoc(config, 1, false, 30)
|
||||
|
||||
testSuites := []*reactorTestSuite{
|
||||
setup(t, genDoc, privVals, maxBlockHeight, 1000), // fully synced node
|
||||
setup(t, genDoc, privVals, 0, 1000),
|
||||
setup(t, genDoc, privVals, 0, 1000),
|
||||
setup(t, genDoc, privVals, 0, 1000),
|
||||
setup(t, genDoc, privVals, 0, 1000), // new node
|
||||
}
|
||||
|
||||
require.Equal(t, maxBlockHeight, testSuites[0].reactor.store.Height())
|
||||
|
||||
for _, s := range testSuites[:len(testSuites)-1] {
|
||||
simulateRouter(s, testSuites, true)
|
||||
|
||||
// connect reactor to every other reactor except the new node
|
||||
for _, ss := range testSuites[:len(testSuites)-1] {
|
||||
if s.peerID != ss.peerID {
|
||||
s.peerUpdatesCh <- p2p.PeerUpdate{
|
||||
Status: p2p.PeerStatusUp,
|
||||
PeerID: ss.peerID,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
require.Eventually(
|
||||
t,
|
||||
func() bool {
|
||||
caughtUp := true
|
||||
for _, s := range testSuites[1 : len(testSuites)-1] {
|
||||
if s.reactor.pool.MaxPeerHeight() == 0 || !s.reactor.pool.IsCaughtUp() {
|
||||
caughtUp = false
|
||||
}
|
||||
}
|
||||
|
||||
return caughtUp
|
||||
},
|
||||
10*time.Minute,
|
||||
10*time.Millisecond,
|
||||
"expected all nodes to be fully synced",
|
||||
)
|
||||
|
||||
for _, s := range testSuites[:len(testSuites)-1] {
|
||||
require.Len(t, s.reactor.pool.peers, 3)
|
||||
}
|
||||
|
||||
// Mark testSuites[3] as an invalid peer which will cause newSuite to disconnect
|
||||
// from this peer.
|
||||
otherGenDoc, otherPrivVals := randGenesisDoc(config, 1, false, 30)
|
||||
otherSuite := setup(t, otherGenDoc, otherPrivVals, maxBlockHeight, 0)
|
||||
testSuites[3].reactor.store = otherSuite.reactor.store
|
||||
|
||||
// add a fake peer just so we do not wait for the consensus ticker to timeout
|
||||
otherSuite.reactor.pool.SetPeerRange("00ff", 10, 10)
|
||||
|
||||
// start the new peer's faux router
|
||||
newSuite := testSuites[len(testSuites)-1]
|
||||
simulateRouter(newSuite, testSuites, false)
|
||||
|
||||
// connect all nodes to the new peer
|
||||
for _, s := range testSuites[:len(testSuites)-1] {
|
||||
newSuite.peerUpdatesCh <- p2p.PeerUpdate{
|
||||
Status: p2p.PeerStatusUp,
|
||||
PeerID: s.peerID,
|
||||
}
|
||||
}
|
||||
|
||||
// wait for the new peer to catch up and become fully synced
|
||||
require.Eventually(
|
||||
t,
|
||||
func() bool { return newSuite.reactor.pool.MaxPeerHeight() > 0 && newSuite.reactor.pool.IsCaughtUp() },
|
||||
10*time.Minute,
|
||||
10*time.Millisecond,
|
||||
"expected new node to be fully synced",
|
||||
)
|
||||
|
||||
require.Eventuallyf(
|
||||
t,
|
||||
func() bool { return len(newSuite.reactor.pool.peers) < len(testSuites)-1 },
|
||||
10*time.Minute,
|
||||
10*time.Millisecond,
|
||||
"invalid number of peers; expected < %d, got: %d",
|
||||
len(testSuites)-1,
|
||||
len(newSuite.reactor.pool.peers),
|
||||
)
|
||||
}
|
||||
@@ -1,50 +0,0 @@
|
||||
package v0
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
func randGenesisDoc(
|
||||
config *cfg.Config,
|
||||
numValidators int,
|
||||
randPower bool,
|
||||
minPower int64,
|
||||
) (*types.GenesisDoc, []types.PrivValidator) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
|
||||
for i := 0; i < numValidators; i++ {
|
||||
val, privVal := types.RandValidator(randPower, minPower)
|
||||
validators[i] = types.GenesisValidator{
|
||||
PubKey: val.PubKey,
|
||||
Power: val.VotingPower,
|
||||
}
|
||||
|
||||
privValidators[i] = privVal
|
||||
}
|
||||
|
||||
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
||||
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: config.ChainID(),
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
@@ -1,187 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
errPeerQueueFull = errors.New("peer queue full")
|
||||
)
|
||||
|
||||
type iIO interface {
|
||||
sendBlockRequest(peer p2p.Peer, height int64) error
|
||||
sendBlockToPeer(block *types.Block, peer p2p.Peer) error
|
||||
sendBlockNotFound(height int64, peer p2p.Peer) error
|
||||
sendStatusResponse(base, height int64, peer p2p.Peer) error
|
||||
|
||||
sendStatusRequest(peer p2p.Peer) error
|
||||
broadcastStatusRequest() error
|
||||
|
||||
trySwitchToConsensus(state state.State, skipWAL bool) bool
|
||||
}
|
||||
|
||||
type switchIO struct {
|
||||
sw *p2p.Switch
|
||||
}
|
||||
|
||||
func newSwitchIo(sw *p2p.Switch) *switchIO {
|
||||
return &switchIO{
|
||||
sw: sw,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
|
||||
BlockchainChannel = byte(0x40)
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// for when we switch from blockchain reactor and fast sync to
|
||||
// the consensus machine
|
||||
SwitchToConsensus(state state.State, skipWAL bool)
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendBlockRequest(peer p2p.Peer, height int64) error {
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_BlockRequest{
|
||||
BlockRequest: &bcproto.BlockRequest{
|
||||
Height: height,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
return errPeerQueueFull
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendStatusResponse(base int64, height int64, peer p2p.Peer) error {
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_StatusResponse{
|
||||
StatusResponse: &bcproto.StatusResponse{
|
||||
Height: height,
|
||||
Base: base,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return errPeerQueueFull
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendBlockToPeer(block *types.Block, peer p2p.Peer) error {
|
||||
if block == nil {
|
||||
panic("trying to send nil block")
|
||||
}
|
||||
|
||||
bpb, err := block.ToProto()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_BlockResponse{
|
||||
BlockResponse: &bcproto.BlockResponse{
|
||||
Block: bpb,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return errPeerQueueFull
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendBlockNotFound(height int64, peer p2p.Peer) error {
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_NoBlockResponse{
|
||||
NoBlockResponse: &bcproto.NoBlockResponse{
|
||||
Height: height,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return errPeerQueueFull
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) trySwitchToConsensus(state state.State, skipWAL bool) bool {
|
||||
conR, ok := sio.sw.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(state, skipWAL)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendStatusRequest(peer p2p.Peer) error {
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_StatusRequest{
|
||||
StatusRequest: &bcproto.StatusRequest{},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return errPeerQueueFull
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) broadcastStatusRequest() error {
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_StatusRequest{
|
||||
StatusRequest: &bcproto.StatusRequest{},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// XXX: maybe we should use an io specific peer list here
|
||||
sio.sw.Broadcast(BlockchainChannel, msgBytes)
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,125 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics"
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
"github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
// MetricsSubsystem is a subsystem shared by all metrics exposed by this
|
||||
// package.
|
||||
MetricsSubsystem = "blockchain"
|
||||
)
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
// events_in
|
||||
EventsIn metrics.Counter
|
||||
// events_in
|
||||
EventsHandled metrics.Counter
|
||||
// events_out
|
||||
EventsOut metrics.Counter
|
||||
// errors_in
|
||||
ErrorsIn metrics.Counter
|
||||
// errors_handled
|
||||
ErrorsHandled metrics.Counter
|
||||
// errors_out
|
||||
ErrorsOut metrics.Counter
|
||||
// events_shed
|
||||
EventsShed metrics.Counter
|
||||
// events_sent
|
||||
EventsSent metrics.Counter
|
||||
// errors_sent
|
||||
ErrorsSent metrics.Counter
|
||||
// errors_shed
|
||||
ErrorsShed metrics.Counter
|
||||
}
|
||||
|
||||
// PrometheusMetrics returns metrics for in and out events, errors, etc. handled by routines.
|
||||
// Can we burn in the routine name here?
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
EventsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_in",
|
||||
Help: "Events read from the channel.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_handled",
|
||||
Help: "Events handled",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_out",
|
||||
Help: "Events output from routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_in",
|
||||
Help: "Errors read from the channel.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_handled",
|
||||
Help: "Errors handled.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_out",
|
||||
Help: "Errors output from routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_sent",
|
||||
Help: "Errors sent to routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_shed",
|
||||
Help: "Errors dropped from sending.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_sent",
|
||||
Help: "Events sent to routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_shed",
|
||||
Help: "Events dropped from sending.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
// NopMetrics returns no-op Metrics.
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
EventsIn: discard.NewCounter(),
|
||||
EventsHandled: discard.NewCounter(),
|
||||
EventsOut: discard.NewCounter(),
|
||||
ErrorsIn: discard.NewCounter(),
|
||||
ErrorsHandled: discard.NewCounter(),
|
||||
ErrorsOut: discard.NewCounter(),
|
||||
EventsShed: discard.NewCounter(),
|
||||
EventsSent: discard.NewCounter(),
|
||||
ErrorsSent: discard.NewCounter(),
|
||||
ErrorsShed: discard.NewCounter(),
|
||||
}
|
||||
}
|
||||
@@ -1,192 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmState "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Events generated by the processor:
|
||||
// block execution failure, event will indicate the peer(s) that caused the error
|
||||
type pcBlockVerificationFailure struct {
|
||||
priorityNormal
|
||||
height int64
|
||||
firstPeerID p2p.NodeID
|
||||
secondPeerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (e pcBlockVerificationFailure) String() string {
|
||||
return fmt.Sprintf("pcBlockVerificationFailure{%d 1st peer: %v, 2nd peer: %v}",
|
||||
e.height, e.firstPeerID, e.secondPeerID)
|
||||
}
|
||||
|
||||
// successful block execution
|
||||
type pcBlockProcessed struct {
|
||||
priorityNormal
|
||||
height int64
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (e pcBlockProcessed) String() string {
|
||||
return fmt.Sprintf("pcBlockProcessed{%d peer: %v}", e.height, e.peerID)
|
||||
}
|
||||
|
||||
// processor has finished
|
||||
type pcFinished struct {
|
||||
priorityNormal
|
||||
blocksSynced int
|
||||
tmState tmState.State
|
||||
}
|
||||
|
||||
func (p pcFinished) Error() string {
|
||||
return "finished"
|
||||
}
|
||||
|
||||
type queueItem struct {
|
||||
block *types.Block
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
type blockQueue map[int64]queueItem
|
||||
|
||||
type pcState struct {
|
||||
// blocks waiting to be processed
|
||||
queue blockQueue
|
||||
|
||||
// draining indicates that the next rProcessBlock event with a queue miss constitutes completion
|
||||
draining bool
|
||||
|
||||
// the number of blocks successfully synced by the processor
|
||||
blocksSynced int
|
||||
|
||||
// the processorContext which contains the processor dependencies
|
||||
context processorContext
|
||||
}
|
||||
|
||||
func (state *pcState) String() string {
|
||||
return fmt.Sprintf("height: %d queue length: %d draining: %v blocks synced: %d",
|
||||
state.height(), len(state.queue), state.draining, state.blocksSynced)
|
||||
}
|
||||
|
||||
// newPcState returns a pcState initialized with the last verified block enqueued
|
||||
func newPcState(context processorContext) *pcState {
|
||||
return &pcState{
|
||||
queue: blockQueue{},
|
||||
draining: false,
|
||||
blocksSynced: 0,
|
||||
context: context,
|
||||
}
|
||||
}
|
||||
|
||||
// nextTwo returns the next two unverified blocks
|
||||
func (state *pcState) nextTwo() (queueItem, queueItem, error) {
|
||||
if first, ok := state.queue[state.height()+1]; ok {
|
||||
if second, ok := state.queue[state.height()+2]; ok {
|
||||
return first, second, nil
|
||||
}
|
||||
}
|
||||
return queueItem{}, queueItem{}, fmt.Errorf("not found")
|
||||
}
|
||||
|
||||
// synced returns true when at most the last verified block remains in the queue
|
||||
func (state *pcState) synced() bool {
|
||||
return len(state.queue) <= 1
|
||||
}
|
||||
|
||||
func (state *pcState) enqueue(peerID p2p.NodeID, block *types.Block, height int64) {
|
||||
if item, ok := state.queue[height]; ok {
|
||||
panic(fmt.Sprintf(
|
||||
"duplicate block %d (%X) enqueued by processor (sent by %v; existing block %X from %v)",
|
||||
height, block.Hash(), peerID, item.block.Hash(), item.peerID))
|
||||
}
|
||||
|
||||
state.queue[height] = queueItem{block: block, peerID: peerID}
|
||||
}
|
||||
|
||||
func (state *pcState) height() int64 {
|
||||
return state.context.tmState().LastBlockHeight
|
||||
}
|
||||
|
||||
// purgePeer moves all unprocessed blocks from the queue
|
||||
func (state *pcState) purgePeer(peerID p2p.NodeID) {
|
||||
// what if height is less than state.height?
|
||||
for height, item := range state.queue {
|
||||
if item.peerID == peerID {
|
||||
delete(state.queue, height)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handle processes FSM events
|
||||
func (state *pcState) handle(event Event) (Event, error) {
|
||||
switch event := event.(type) {
|
||||
case bcResetState:
|
||||
state.context.setState(event.state)
|
||||
return noOp, nil
|
||||
|
||||
case scFinishedEv:
|
||||
if state.synced() {
|
||||
return pcFinished{tmState: state.context.tmState(), blocksSynced: state.blocksSynced}, nil
|
||||
}
|
||||
state.draining = true
|
||||
return noOp, nil
|
||||
|
||||
case scPeerError:
|
||||
state.purgePeer(event.peerID)
|
||||
return noOp, nil
|
||||
|
||||
case scBlockReceived:
|
||||
if event.block == nil {
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
// enqueue block if height is higher than state height, else ignore it
|
||||
if event.block.Height > state.height() {
|
||||
state.enqueue(event.peerID, event.block, event.block.Height)
|
||||
}
|
||||
return noOp, nil
|
||||
|
||||
case rProcessBlock:
|
||||
tmState := state.context.tmState()
|
||||
firstItem, secondItem, err := state.nextTwo()
|
||||
if err != nil {
|
||||
if state.draining {
|
||||
return pcFinished{tmState: tmState, blocksSynced: state.blocksSynced}, nil
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
var (
|
||||
first, second = firstItem.block, secondItem.block
|
||||
firstParts = first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstParts.Header()}
|
||||
)
|
||||
|
||||
// verify if +second+ last commit "confirms" +first+ block
|
||||
err = state.context.verifyCommit(tmState.ChainID, firstID, first.Height, second.LastCommit)
|
||||
if err != nil {
|
||||
state.purgePeer(firstItem.peerID)
|
||||
if firstItem.peerID != secondItem.peerID {
|
||||
state.purgePeer(secondItem.peerID)
|
||||
}
|
||||
return pcBlockVerificationFailure{
|
||||
height: first.Height, firstPeerID: firstItem.peerID, secondPeerID: secondItem.peerID},
|
||||
nil
|
||||
}
|
||||
|
||||
state.context.saveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
if err := state.context.applyBlock(firstID, first); err != nil {
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
|
||||
delete(state.queue, first.Height)
|
||||
state.blocksSynced++
|
||||
|
||||
return pcBlockProcessed{height: first.Height, peerID: firstItem.peerID}, nil
|
||||
}
|
||||
|
||||
return noOp, nil
|
||||
}
|
||||
@@ -1,100 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type processorContext interface {
|
||||
applyBlock(blockID types.BlockID, block *types.Block) error
|
||||
verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error
|
||||
saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit)
|
||||
tmState() state.State
|
||||
setState(state.State)
|
||||
}
|
||||
|
||||
type pContext struct {
|
||||
store blockStore
|
||||
applier blockApplier
|
||||
state state.State
|
||||
}
|
||||
|
||||
func newProcessorContext(st blockStore, ex blockApplier, s state.State) *pContext {
|
||||
return &pContext{
|
||||
store: st,
|
||||
applier: ex,
|
||||
state: s,
|
||||
}
|
||||
}
|
||||
|
||||
func (pc *pContext) applyBlock(blockID types.BlockID, block *types.Block) error {
|
||||
newState, _, err := pc.applier.ApplyBlock(pc.state, blockID, block)
|
||||
pc.state = newState
|
||||
return err
|
||||
}
|
||||
|
||||
func (pc pContext) tmState() state.State {
|
||||
return pc.state
|
||||
}
|
||||
|
||||
func (pc *pContext) setState(state state.State) {
|
||||
pc.state = state
|
||||
}
|
||||
|
||||
func (pc pContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error {
|
||||
return pc.state.Validators.VerifyCommitLight(chainID, blockID, height, commit)
|
||||
}
|
||||
|
||||
func (pc *pContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
pc.store.SaveBlock(block, blockParts, seenCommit)
|
||||
}
|
||||
|
||||
type mockPContext struct {
|
||||
applicationBL []int64
|
||||
verificationBL []int64
|
||||
state state.State
|
||||
}
|
||||
|
||||
func newMockProcessorContext(
|
||||
state state.State,
|
||||
verificationBlackList []int64,
|
||||
applicationBlackList []int64) *mockPContext {
|
||||
return &mockPContext{
|
||||
applicationBL: applicationBlackList,
|
||||
verificationBL: verificationBlackList,
|
||||
state: state,
|
||||
}
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) applyBlock(blockID types.BlockID, block *types.Block) error {
|
||||
for _, h := range mpc.applicationBL {
|
||||
if h == block.Height {
|
||||
return fmt.Errorf("generic application error")
|
||||
}
|
||||
}
|
||||
mpc.state.LastBlockHeight = block.Height
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error {
|
||||
for _, h := range mpc.verificationBL {
|
||||
if h == height {
|
||||
return fmt.Errorf("generic verification error")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) setState(state state.State) {
|
||||
mpc.state = state
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) tmState() state.State {
|
||||
return mpc.state
|
||||
}
|
||||
@@ -1,306 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmState "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// pcBlock is a test helper structure with simple types. Its purpose is to help with test readability.
|
||||
type pcBlock struct {
|
||||
pid string
|
||||
height int64
|
||||
}
|
||||
|
||||
// params is a test structure used to create processor state.
|
||||
type params struct {
|
||||
height int64
|
||||
items []pcBlock
|
||||
blocksSynced int
|
||||
verBL []int64
|
||||
appBL []int64
|
||||
draining bool
|
||||
}
|
||||
|
||||
// makePcBlock makes an empty block.
|
||||
func makePcBlock(height int64) *types.Block {
|
||||
return &types.Block{Header: types.Header{Height: height}}
|
||||
}
|
||||
|
||||
// makeState takes test parameters and creates a specific processor state.
|
||||
func makeState(p *params) *pcState {
|
||||
var (
|
||||
tmState = tmState.State{LastBlockHeight: p.height}
|
||||
context = newMockProcessorContext(tmState, p.verBL, p.appBL)
|
||||
)
|
||||
state := newPcState(context)
|
||||
|
||||
for _, item := range p.items {
|
||||
state.enqueue(p2p.NodeID(item.pid), makePcBlock(item.height), item.height)
|
||||
}
|
||||
|
||||
state.blocksSynced = p.blocksSynced
|
||||
state.draining = p.draining
|
||||
return state
|
||||
}
|
||||
|
||||
func mBlockResponse(peerID p2p.NodeID, height int64) scBlockReceived {
|
||||
return scBlockReceived{
|
||||
peerID: peerID,
|
||||
block: makePcBlock(height),
|
||||
}
|
||||
}
|
||||
|
||||
type pcFsmMakeStateValues struct {
|
||||
currentState *params
|
||||
event Event
|
||||
wantState *params
|
||||
wantNextEvent Event
|
||||
wantErr error
|
||||
wantPanic bool
|
||||
}
|
||||
|
||||
type testFields struct {
|
||||
name string
|
||||
steps []pcFsmMakeStateValues
|
||||
}
|
||||
|
||||
func executeProcessorTests(t *testing.T, tests []testFields) {
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var state *pcState
|
||||
for _, step := range tt.steps {
|
||||
defer func() {
|
||||
r := recover()
|
||||
if (r != nil) != step.wantPanic {
|
||||
t.Errorf("recover = %v, wantPanic = %v", r, step.wantPanic)
|
||||
}
|
||||
}()
|
||||
|
||||
// First step must always initialise the currentState as state.
|
||||
if step.currentState != nil {
|
||||
state = makeState(step.currentState)
|
||||
}
|
||||
if state == nil {
|
||||
panic("Bad (initial?) step")
|
||||
}
|
||||
|
||||
nextEvent, err := state.handle(step.event)
|
||||
t.Log(state)
|
||||
assert.Equal(t, step.wantErr, err)
|
||||
assert.Equal(t, makeState(step.wantState), state)
|
||||
assert.Equal(t, step.wantNextEvent, nextEvent)
|
||||
// Next step may use the wantedState as their currentState.
|
||||
state = makeState(step.wantState)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRProcessPeerError(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "error for existing peer",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}},
|
||||
event: scPeerError{peerID: "P2"},
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 1}}},
|
||||
wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "error for unknown peer",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}},
|
||||
event: scPeerError{peerID: "P3"},
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}},
|
||||
wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
|
||||
func TestPcBlockResponse(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "add one block",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{}, event: mBlockResponse("P1", 1),
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 1}}}, wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
name: "add two blocks",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{}, event: mBlockResponse("P1", 3),
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 3}}}, wantNextEvent: noOp,
|
||||
},
|
||||
{ // use previous wantState as currentState,
|
||||
event: mBlockResponse("P1", 4),
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 3}, {"P1", 4}}}, wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
|
||||
func TestRProcessBlockSuccess(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "noop - no blocks over current height",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{}, event: rProcessBlock{},
|
||||
wantState: ¶ms{}, wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "noop - high new blocks",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 5, items: []pcBlock{{"P1", 30}, {"P2", 31}}}, event: rProcessBlock{},
|
||||
wantState: ¶ms{height: 5, items: []pcBlock{{"P1", 30}, {"P2", 31}}}, wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, event: rProcessBlock{},
|
||||
wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}}, blocksSynced: 1},
|
||||
wantNextEvent: pcBlockProcessed{height: 1, peerID: "P1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present after draining",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{ // some contiguous blocks - on stop check draining is set
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P1", 4}}},
|
||||
event: scFinishedEv{},
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P1", 4}}, draining: true},
|
||||
wantNextEvent: noOp,
|
||||
},
|
||||
{
|
||||
event: rProcessBlock{},
|
||||
wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true},
|
||||
wantNextEvent: pcBlockProcessed{height: 1, peerID: "P1"},
|
||||
},
|
||||
{ // finish when H+1 or/and H+2 are missing
|
||||
event: rProcessBlock{},
|
||||
wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true},
|
||||
wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 1}, blocksSynced: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
|
||||
func TestRProcessBlockFailures(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "blocks H+1 and H+2 present from different peers - H+1 verification fails ",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}, verBL: []int64{1}}, event: rProcessBlock{},
|
||||
wantState: ¶ms{items: []pcBlock{}, verBL: []int64{1}},
|
||||
wantNextEvent: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present from same peer - H+1 applyBlock fails ",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}, appBL: []int64{1}}, event: rProcessBlock{},
|
||||
wantState: ¶ms{items: []pcBlock{}, appBL: []int64{1}}, wantPanic: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present from same peers - H+1 verification fails ",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 0, items: []pcBlock{{"P1", 1}, {"P1", 2}, {"P2", 3}},
|
||||
verBL: []int64{1}}, event: rProcessBlock{},
|
||||
wantState: ¶ms{height: 0, items: []pcBlock{{"P2", 3}}, verBL: []int64{1}},
|
||||
wantNextEvent: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present from different peers - H+1 applyBlock fails ",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P2", 3}}, appBL: []int64{1}},
|
||||
event: rProcessBlock{},
|
||||
wantState: ¶ms{items: []pcBlock{{"P2", 3}}, appBL: []int64{1}}, wantPanic: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
|
||||
func TestScFinishedEv(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "no blocks",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100}, event: scFinishedEv{},
|
||||
wantState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100},
|
||||
wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 100}, blocksSynced: 100},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "maxHeight+1 block present",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 100, items: []pcBlock{
|
||||
{"P1", 101}}, blocksSynced: 100}, event: scFinishedEv{},
|
||||
wantState: ¶ms{height: 100, items: []pcBlock{{"P1", 101}}, blocksSynced: 100},
|
||||
wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 100}, blocksSynced: 100},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "more blocks present",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 100, items: []pcBlock{
|
||||
{"P1", 101}, {"P1", 102}}, blocksSynced: 100}, event: scFinishedEv{},
|
||||
wantState: ¶ms{height: 100, items: []pcBlock{
|
||||
{"P1", 101}, {"P1", 102}}, blocksSynced: 100, draining: true},
|
||||
wantNextEvent: noOp,
|
||||
wantErr: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
@@ -1,590 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// chBufferSize is the buffer size of all event channels.
|
||||
chBufferSize int = 1000
|
||||
)
|
||||
|
||||
type blockStore interface {
|
||||
LoadBlock(height int64) *types.Block
|
||||
SaveBlock(*types.Block, *types.PartSet, *types.Commit)
|
||||
Base() int64
|
||||
Height() int64
|
||||
}
|
||||
|
||||
// BlockchainReactor handles fast sync protocol.
|
||||
type BlockchainReactor struct {
|
||||
p2p.BaseReactor
|
||||
|
||||
fastSync bool // if true, enable fast sync on start
|
||||
stateSynced bool // set to true when SwitchToFastSync is called by state sync
|
||||
scheduler *Routine
|
||||
processor *Routine
|
||||
logger log.Logger
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
maxPeerHeight int64
|
||||
syncHeight int64
|
||||
events chan Event // non-nil during a fast sync
|
||||
|
||||
reporter behaviour.Reporter
|
||||
io iIO
|
||||
store blockStore
|
||||
}
|
||||
|
||||
type blockApplier interface {
|
||||
ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, int64, error)
|
||||
}
|
||||
|
||||
// XXX: unify naming in this package around tmState
|
||||
func newReactor(state state.State, store blockStore, reporter behaviour.Reporter,
|
||||
blockApplier blockApplier, fastSync bool) *BlockchainReactor {
|
||||
initHeight := state.LastBlockHeight + 1
|
||||
if initHeight == 1 {
|
||||
initHeight = state.InitialHeight
|
||||
}
|
||||
scheduler := newScheduler(initHeight, time.Now())
|
||||
pContext := newProcessorContext(store, blockApplier, state)
|
||||
// TODO: Fix naming to just newProcesssor
|
||||
// newPcState requires a processorContext
|
||||
processor := newPcState(pContext)
|
||||
|
||||
return &BlockchainReactor{
|
||||
scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize),
|
||||
processor: newRoutine("processor", processor.handle, chBufferSize),
|
||||
store: store,
|
||||
reporter: reporter,
|
||||
logger: log.NewNopLogger(),
|
||||
fastSync: fastSync,
|
||||
}
|
||||
}
|
||||
|
||||
// NewBlockchainReactor creates a new reactor instance.
|
||||
func NewBlockchainReactor(
|
||||
state state.State,
|
||||
blockApplier blockApplier,
|
||||
store blockStore,
|
||||
fastSync bool) *BlockchainReactor {
|
||||
reporter := behaviour.NewMockReporter()
|
||||
return newReactor(state, store, reporter, blockApplier, fastSync)
|
||||
}
|
||||
|
||||
// SetSwitch implements Reactor interface.
|
||||
func (r *BlockchainReactor) SetSwitch(sw *p2p.Switch) {
|
||||
r.Switch = sw
|
||||
if sw != nil {
|
||||
r.io = newSwitchIo(sw)
|
||||
} else {
|
||||
r.io = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) setMaxPeerHeight(height int64) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
if height > r.maxPeerHeight {
|
||||
r.maxPeerHeight = height
|
||||
}
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) setSyncHeight(height int64) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
r.syncHeight = height
|
||||
}
|
||||
|
||||
// SyncHeight returns the height to which the BlockchainReactor has synced.
|
||||
func (r *BlockchainReactor) SyncHeight() int64 {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
return r.syncHeight
|
||||
}
|
||||
|
||||
// SetLogger sets the logger of the reactor.
|
||||
func (r *BlockchainReactor) SetLogger(logger log.Logger) {
|
||||
r.logger = logger
|
||||
r.scheduler.setLogger(logger)
|
||||
r.processor.setLogger(logger)
|
||||
}
|
||||
|
||||
// Start implements cmn.Service interface
|
||||
func (r *BlockchainReactor) Start() error {
|
||||
r.reporter = behaviour.NewSwitchReporter(r.BaseReactor.Switch)
|
||||
if r.fastSync {
|
||||
err := r.startSync(nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start fast sync: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// startSync begins a fast sync, signalled by r.events being non-nil. If state is non-nil,
|
||||
// the scheduler and processor is updated with this state on startup.
|
||||
func (r *BlockchainReactor) startSync(state *state.State) error {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
if r.events != nil {
|
||||
return errors.New("fast sync already in progress")
|
||||
}
|
||||
r.events = make(chan Event, chBufferSize)
|
||||
go r.scheduler.start()
|
||||
go r.processor.start()
|
||||
if state != nil {
|
||||
<-r.scheduler.ready()
|
||||
<-r.processor.ready()
|
||||
r.scheduler.send(bcResetState{state: *state})
|
||||
r.processor.send(bcResetState{state: *state})
|
||||
}
|
||||
go r.demux(r.events)
|
||||
return nil
|
||||
}
|
||||
|
||||
// endSync ends a fast sync
|
||||
func (r *BlockchainReactor) endSync() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
if r.events != nil {
|
||||
close(r.events)
|
||||
}
|
||||
r.events = nil
|
||||
r.scheduler.stop()
|
||||
r.processor.stop()
|
||||
}
|
||||
|
||||
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
|
||||
func (r *BlockchainReactor) SwitchToFastSync(state state.State) error {
|
||||
r.stateSynced = true
|
||||
state = state.Copy()
|
||||
return r.startSync(&state)
|
||||
}
|
||||
|
||||
// reactor generated ticker events:
|
||||
// ticker for cleaning peers
|
||||
type rTryPrunePeer struct {
|
||||
priorityHigh
|
||||
time time.Time
|
||||
}
|
||||
|
||||
func (e rTryPrunePeer) String() string {
|
||||
return fmt.Sprintf("rTryPrunePeer{%v}", e.time)
|
||||
}
|
||||
|
||||
// ticker event for scheduling block requests
|
||||
type rTrySchedule struct {
|
||||
priorityHigh
|
||||
time time.Time
|
||||
}
|
||||
|
||||
func (e rTrySchedule) String() string {
|
||||
return fmt.Sprintf("rTrySchedule{%v}", e.time)
|
||||
}
|
||||
|
||||
// ticker for block processing
|
||||
type rProcessBlock struct {
|
||||
priorityNormal
|
||||
}
|
||||
|
||||
func (e rProcessBlock) String() string {
|
||||
return "rProcessBlock"
|
||||
}
|
||||
|
||||
// reactor generated events based on blockchain related messages from peers:
|
||||
// blockResponse message received from a peer
|
||||
type bcBlockResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID p2p.NodeID
|
||||
size int64
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
func (resp bcBlockResponse) String() string {
|
||||
return fmt.Sprintf("bcBlockResponse{%d#%X (size: %d bytes) from %v at %v}",
|
||||
resp.block.Height, resp.block.Hash(), resp.size, resp.peerID, resp.time)
|
||||
}
|
||||
|
||||
// blockNoResponse message received from a peer
|
||||
type bcNoBlockResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID p2p.NodeID
|
||||
height int64
|
||||
}
|
||||
|
||||
func (resp bcNoBlockResponse) String() string {
|
||||
return fmt.Sprintf("bcNoBlockResponse{%v has no block at height %d at %v}",
|
||||
resp.peerID, resp.height, resp.time)
|
||||
}
|
||||
|
||||
// statusResponse message received from a peer
|
||||
type bcStatusResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID p2p.NodeID
|
||||
base int64
|
||||
height int64
|
||||
}
|
||||
|
||||
func (resp bcStatusResponse) String() string {
|
||||
return fmt.Sprintf("bcStatusResponse{%v is at height %d (base: %d) at %v}",
|
||||
resp.peerID, resp.height, resp.base, resp.time)
|
||||
}
|
||||
|
||||
// new peer is connected
|
||||
type bcAddNewPeer struct {
|
||||
priorityNormal
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (resp bcAddNewPeer) String() string {
|
||||
return fmt.Sprintf("bcAddNewPeer{%v}", resp.peerID)
|
||||
}
|
||||
|
||||
// existing peer is removed
|
||||
type bcRemovePeer struct {
|
||||
priorityHigh
|
||||
peerID p2p.NodeID
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
func (resp bcRemovePeer) String() string {
|
||||
return fmt.Sprintf("bcRemovePeer{%v due to %v}", resp.peerID, resp.reason)
|
||||
}
|
||||
|
||||
// resets the scheduler and processor state, e.g. following a switch from state syncing
|
||||
type bcResetState struct {
|
||||
priorityHigh
|
||||
state state.State
|
||||
}
|
||||
|
||||
func (e bcResetState) String() string {
|
||||
return fmt.Sprintf("bcResetState{%v}", e.state)
|
||||
}
|
||||
|
||||
// Takes the channel as a parameter to avoid race conditions on r.events.
|
||||
func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
var lastRate = 0.0
|
||||
var lastHundred = time.Now()
|
||||
|
||||
var (
|
||||
processBlockFreq = 20 * time.Millisecond
|
||||
doProcessBlockCh = make(chan struct{}, 1)
|
||||
doProcessBlockTk = time.NewTicker(processBlockFreq)
|
||||
)
|
||||
defer doProcessBlockTk.Stop()
|
||||
|
||||
var (
|
||||
prunePeerFreq = 1 * time.Second
|
||||
doPrunePeerCh = make(chan struct{}, 1)
|
||||
doPrunePeerTk = time.NewTicker(prunePeerFreq)
|
||||
)
|
||||
defer doPrunePeerTk.Stop()
|
||||
|
||||
var (
|
||||
scheduleFreq = 20 * time.Millisecond
|
||||
doScheduleCh = make(chan struct{}, 1)
|
||||
doScheduleTk = time.NewTicker(scheduleFreq)
|
||||
)
|
||||
defer doScheduleTk.Stop()
|
||||
|
||||
var (
|
||||
statusFreq = 10 * time.Second
|
||||
doStatusCh = make(chan struct{}, 1)
|
||||
doStatusTk = time.NewTicker(statusFreq)
|
||||
)
|
||||
defer doStatusTk.Stop()
|
||||
doStatusCh <- struct{}{} // immediately broadcast to get status of existing peers
|
||||
|
||||
// Memoize the scSchedulerFail error to avoid printing it every scheduleFreq.
|
||||
var scSchedulerFailErr error
|
||||
|
||||
// XXX: Extract timers to make testing atemporal
|
||||
for {
|
||||
select {
|
||||
// Pacers: send at most per frequency but don't saturate
|
||||
case <-doProcessBlockTk.C:
|
||||
select {
|
||||
case doProcessBlockCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-doPrunePeerTk.C:
|
||||
select {
|
||||
case doPrunePeerCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-doScheduleTk.C:
|
||||
select {
|
||||
case doScheduleCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-doStatusTk.C:
|
||||
select {
|
||||
case doStatusCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
// Tickers: perform tasks periodically
|
||||
case <-doScheduleCh:
|
||||
r.scheduler.send(rTrySchedule{time: time.Now()})
|
||||
case <-doPrunePeerCh:
|
||||
r.scheduler.send(rTryPrunePeer{time: time.Now()})
|
||||
case <-doProcessBlockCh:
|
||||
r.processor.send(rProcessBlock{})
|
||||
case <-doStatusCh:
|
||||
if err := r.io.broadcastStatusRequest(); err != nil {
|
||||
r.logger.Error("Error broadcasting status request", "err", err)
|
||||
}
|
||||
|
||||
// Events from peers. Closing the channel signals event loop termination.
|
||||
case event, ok := <-events:
|
||||
if !ok {
|
||||
r.logger.Info("Stopping event processing")
|
||||
return
|
||||
}
|
||||
switch event := event.(type) {
|
||||
case bcStatusResponse:
|
||||
r.setMaxPeerHeight(event.height)
|
||||
r.scheduler.send(event)
|
||||
case bcAddNewPeer, bcRemovePeer, bcBlockResponse, bcNoBlockResponse:
|
||||
r.scheduler.send(event)
|
||||
default:
|
||||
r.logger.Error("Received unexpected event", "event", fmt.Sprintf("%T", event))
|
||||
}
|
||||
|
||||
// Incremental events from scheduler
|
||||
case event := <-r.scheduler.next():
|
||||
switch event := event.(type) {
|
||||
case scBlockReceived:
|
||||
r.processor.send(event)
|
||||
case scPeerError:
|
||||
r.processor.send(event)
|
||||
if err := r.reporter.Report(behaviour.BadMessage(event.peerID, "scPeerError")); err != nil {
|
||||
r.logger.Error("Error reporting peer", "err", err)
|
||||
}
|
||||
case scBlockRequest:
|
||||
peer := r.Switch.Peers().Get(event.peerID)
|
||||
if peer == nil {
|
||||
r.logger.Error("Wanted to send block request, but no such peer", "peerID", event.peerID)
|
||||
continue
|
||||
}
|
||||
if err := r.io.sendBlockRequest(peer, event.height); err != nil {
|
||||
r.logger.Error("Error sending block request", "err", err)
|
||||
}
|
||||
case scFinishedEv:
|
||||
r.processor.send(event)
|
||||
r.scheduler.stop()
|
||||
case scSchedulerFail:
|
||||
if scSchedulerFailErr != event.reason {
|
||||
r.logger.Error("Scheduler failure", "err", event.reason.Error())
|
||||
scSchedulerFailErr = event.reason
|
||||
}
|
||||
case scPeersPruned:
|
||||
// Remove peers from the processor.
|
||||
for _, peerID := range event.peers {
|
||||
r.processor.send(scPeerError{peerID: peerID, reason: errors.New("peer was pruned")})
|
||||
}
|
||||
r.logger.Debug("Pruned peers", "count", len(event.peers))
|
||||
case noOpEvent:
|
||||
default:
|
||||
r.logger.Error("Received unexpected scheduler event", "event", fmt.Sprintf("%T", event))
|
||||
}
|
||||
|
||||
// Incremental events from processor
|
||||
case event := <-r.processor.next():
|
||||
switch event := event.(type) {
|
||||
case pcBlockProcessed:
|
||||
r.setSyncHeight(event.height)
|
||||
if r.syncHeight%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
r.logger.Info("Fast Sync Rate", "height", r.syncHeight,
|
||||
"max_peer_height", r.maxPeerHeight, "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
r.scheduler.send(event)
|
||||
case pcBlockVerificationFailure:
|
||||
r.scheduler.send(event)
|
||||
case pcFinished:
|
||||
r.logger.Info("Fast sync complete, switching to consensus")
|
||||
if !r.io.trySwitchToConsensus(event.tmState, event.blocksSynced > 0 || r.stateSynced) {
|
||||
r.logger.Error("Failed to switch to consensus reactor")
|
||||
}
|
||||
r.endSync()
|
||||
return
|
||||
case noOpEvent:
|
||||
default:
|
||||
r.logger.Error("Received unexpected processor event", "event", fmt.Sprintf("%T", event))
|
||||
}
|
||||
|
||||
// Terminal event from scheduler
|
||||
case err := <-r.scheduler.final():
|
||||
switch err {
|
||||
case nil:
|
||||
r.logger.Info("Scheduler stopped")
|
||||
default:
|
||||
r.logger.Error("Scheduler aborted with error", "err", err)
|
||||
}
|
||||
|
||||
// Terminal event from processor
|
||||
case err := <-r.processor.final():
|
||||
switch err {
|
||||
case nil:
|
||||
r.logger.Info("Processor stopped")
|
||||
default:
|
||||
r.logger.Error("Processor aborted with error", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Stop implements cmn.Service interface.
|
||||
func (r *BlockchainReactor) Stop() error {
|
||||
r.logger.Info("reactor stopping")
|
||||
r.endSync()
|
||||
r.logger.Info("reactor stopped")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling different message types.
|
||||
// XXX: do not call any methods that can block or incur heavy processing.
|
||||
// https://github.com/tendermint/tendermint/issues/2888
|
||||
func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
logger := r.logger.With("src", src.ID(), "chID", chID)
|
||||
|
||||
msgProto := new(bcproto.Message)
|
||||
|
||||
if err := proto.Unmarshal(msgBytes, msgProto); err != nil {
|
||||
logger.Error("error decoding message", "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if err := msgProto.Validate(); err != nil {
|
||||
logger.Error("peer sent us an invalid msg", "msg", msgProto, "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
r.logger.Debug("received", "msg", msgProto)
|
||||
|
||||
switch msg := msgProto.Sum.(type) {
|
||||
case *bcproto.Message_StatusRequest:
|
||||
if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src); err != nil {
|
||||
logger.Error("Could not send status message to src peer")
|
||||
}
|
||||
|
||||
case *bcproto.Message_BlockRequest:
|
||||
block := r.store.LoadBlock(msg.BlockRequest.Height)
|
||||
if block != nil {
|
||||
if err := r.io.sendBlockToPeer(block, src); err != nil {
|
||||
logger.Error("Could not send block message to src peer", "err", err)
|
||||
}
|
||||
} else {
|
||||
logger.Info("peer asking for a block we don't have", "height", msg.BlockRequest.Height)
|
||||
if err := r.io.sendBlockNotFound(msg.BlockRequest.Height, src); err != nil {
|
||||
logger.Error("Couldn't send block not found msg", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
case *bcproto.Message_StatusResponse:
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcStatusResponse{
|
||||
peerID: src.ID(),
|
||||
base: msg.StatusResponse.Base,
|
||||
height: msg.StatusResponse.Height,
|
||||
}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
|
||||
case *bcproto.Message_BlockResponse:
|
||||
bi, err := types.BlockFromProto(msg.BlockResponse.Block)
|
||||
if err != nil {
|
||||
logger.Error("error transitioning block from protobuf", "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcBlockResponse{
|
||||
peerID: src.ID(),
|
||||
block: bi,
|
||||
size: int64(len(msgBytes)),
|
||||
time: time.Now(),
|
||||
}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
|
||||
case *bcproto.Message_NoBlockResponse:
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcNoBlockResponse{
|
||||
peerID: src.ID(),
|
||||
height: msg.NoBlockResponse.Height,
|
||||
time: time.Now(),
|
||||
}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor interface
|
||||
func (r *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer)
|
||||
if err != nil {
|
||||
r.logger.Error("could not send our status to the new peer", "peer", peer.ID, "err", err)
|
||||
}
|
||||
|
||||
err = r.io.sendStatusRequest(peer)
|
||||
if err != nil {
|
||||
r.logger.Error("could not send status request to the new peer", "peer", peer.ID, "err", err)
|
||||
}
|
||||
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
if r.events != nil {
|
||||
r.events <- bcAddNewPeer{peerID: peer.ID()}
|
||||
}
|
||||
}
|
||||
|
||||
// RemovePeer implements Reactor interface.
|
||||
func (r *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
if r.events != nil {
|
||||
r.events <- bcRemovePeer{
|
||||
peerID: peer.ID(),
|
||||
reason: reason,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetChannels implements Reactor
|
||||
func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: BlockchainChannel,
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 2000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
},
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user