mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-23 21:22:06 +00:00
Compare commits
672 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
af99236105 | ||
|
|
5aa209a156 | ||
|
|
32bb813b0a | ||
|
|
7e3c66e28f | ||
|
|
a3a09285de | ||
|
|
6b71b928be | ||
|
|
faf783331d | ||
|
|
5f0f54f239 | ||
|
|
86adc2c89f | ||
|
|
db235c8df0 | ||
|
|
15aa8d82b3 | ||
|
|
6d91c1faf4 | ||
|
|
d7f4ce30ca | ||
|
|
8f5d58f32e | ||
|
|
7be74c73b7 | ||
|
|
55410593d0 | ||
|
|
279482ce99 | ||
|
|
7f655d8e9e | ||
|
|
ffe5dff8d9 | ||
|
|
7d7b47a39a | ||
|
|
22280400a6 | ||
|
|
a18380901f | ||
|
|
7368ba358b | ||
|
|
d86706b0e8 | ||
|
|
fff7f450be | ||
|
|
bd77284d5b | ||
|
|
eec9b2c9c8 | ||
|
|
9bd0f9e634 | ||
|
|
f0777277ad | ||
|
|
4bfec797e8 | ||
|
|
c9708105f0 | ||
|
|
7b52f51700 | ||
|
|
7e02211246 | ||
|
|
89f0bbbd76 | ||
|
|
15e80d2448 | ||
|
|
4da816a849 | ||
|
|
0f69336148 | ||
|
|
2b30f02879 | ||
|
|
27b00cf8d1 | ||
|
|
efc27ab0b9 | ||
|
|
afc4d7a61f | ||
|
|
dfebac86f7 | ||
|
|
6b1fef7f4d | ||
|
|
a61f19b12e | ||
|
|
8b1b5b21c6 | ||
|
|
f309077465 | ||
|
|
d1ada18721 | ||
|
|
00c4b52a58 | ||
|
|
70d94b8adf | ||
|
|
3e2751d274 | ||
|
|
b0bb8a1437 | ||
|
|
06b8752a46 | ||
|
|
92d18d7fcd | ||
|
|
1c46145be7 | ||
|
|
49ffb70e41 | ||
|
|
701e9cac4d | ||
|
|
100078ca05 | ||
|
|
55278211ed | ||
|
|
0523ecf632 | ||
|
|
c066b39396 | ||
|
|
759ccebe54 | ||
|
|
af3afc2817 | ||
|
|
ad715fe966 | ||
|
|
fb8b00f1d8 | ||
|
|
ceb105316b | ||
|
|
98e0b5297c | ||
|
|
c43143707a | ||
|
|
edd6ee2a09 | ||
|
|
d7d61e9a6c | ||
|
|
0d0d151e2e | ||
|
|
82e48426f2 | ||
|
|
208a8d37e6 | ||
|
|
43aed989ac | ||
|
|
d5ed8a794b | ||
|
|
d840fa3f02 | ||
|
|
72f39e2d1f | ||
|
|
fb9e667f18 | ||
|
|
ee6601ad48 | ||
|
|
f78a994e26 | ||
|
|
68b2873c9b | ||
|
|
eeb6ee1a9a | ||
|
|
fc0d5bca61 | ||
|
|
2b906630fa | ||
|
|
acac525e49 | ||
|
|
97222eb90f | ||
|
|
aca94fd9a0 | ||
|
|
8a878c1cad | ||
|
|
45c24ff8ea | ||
|
|
3e1516b624 | ||
|
|
36ba1a8494 | ||
|
|
e71a2b161b | ||
|
|
44a3fbf109 | ||
|
|
1ddc43d029 | ||
|
|
b43da17958 | ||
|
|
41403d5261 | ||
|
|
1604047c39 | ||
|
|
9174fb7892 | ||
|
|
59da313bc0 | ||
|
|
7bc5e1aa00 | ||
|
|
2be4b0fe05 | ||
|
|
7cd55a7733 | ||
|
|
79500277ae | ||
|
|
0c6ad4f714 | ||
|
|
1fc4ab7234 | ||
|
|
2c81f68bb1 | ||
|
|
6a4608230c | ||
|
|
cf4360c673 | ||
|
|
263b2d28bd | ||
|
|
40f702a70d | ||
|
|
8b784929d1 | ||
|
|
4b15b96588 | ||
|
|
98c595312a | ||
|
|
3495a915cc | ||
|
|
274447e2b0 | ||
|
|
0a014e37b7 | ||
|
|
5532e3a862 | ||
|
|
4227556405 | ||
|
|
745846bd96 | ||
|
|
76deaa986e | ||
|
|
735736e3a6 | ||
|
|
0335d07ff6 | ||
|
|
2df4ca0f39 | ||
|
|
cbd5e031d6 | ||
|
|
b2475227a5 | ||
|
|
ad897a8f00 | ||
|
|
5206ce32a0 | ||
|
|
51a83f0826 | ||
|
|
c5bcdd3a22 | ||
|
|
2f8ba80689 | ||
|
|
6dd1718311 | ||
|
|
65e08a7afd | ||
|
|
f323c80cb3 | ||
|
|
288f1a0bc6 | ||
|
|
bc572217c0 | ||
|
|
8ba159834f | ||
|
|
32bafcb38c | ||
|
|
b1a920c8c2 | ||
|
|
ea3dc6d742 | ||
|
|
f58741be62 | ||
|
|
892bc4281d | ||
|
|
67cdd008cd | ||
|
|
7edc0601d5 | ||
|
|
aaa060fda4 | ||
|
|
3e5fa20f4c | ||
|
|
f2b7cecb79 | ||
|
|
7ac8443cc0 | ||
|
|
10e41b40c0 | ||
|
|
143b2b85c3 | ||
|
|
fb65d065ae | ||
|
|
04b8bfaec2 | ||
|
|
9ad9c5a786 | ||
|
|
a13a4b8c77 | ||
|
|
38359f407e | ||
|
|
acea49e4ae | ||
|
|
e024805354 | ||
|
|
b225e0e87e | ||
|
|
004ff3ac38 | ||
|
|
9dc1ca1537 | ||
|
|
ba547cb780 | ||
|
|
4c11bab23f | ||
|
|
1af0e83931 | ||
|
|
8c9bf1dade | ||
|
|
ebfaf30705 | ||
|
|
797a3f6c16 | ||
|
|
05075ea5b7 | ||
|
|
68f8fba7c2 | ||
|
|
ac099aa272 | ||
|
|
cb1a0a7333 | ||
|
|
b065b8a0c5 | ||
|
|
a81aa14b06 | ||
|
|
134fe28962 | ||
|
|
51dc810d04 | ||
|
|
0d13736ace | ||
|
|
bf989eb272 | ||
|
|
b3b30a5e70 | ||
|
|
8fbc62563e | ||
|
|
049f158465 | ||
|
|
a5a19add77 | ||
|
|
fce61cc51c | ||
|
|
0116c8113b | ||
|
|
4dfbaeb0c4 | ||
|
|
abab49089d | ||
|
|
fc77f8faf3 | ||
|
|
2ae7a300b7 | ||
|
|
211bd64355 | ||
|
|
0cbf32de97 | ||
|
|
2c7f42fafa | ||
|
|
1b54369f41 | ||
|
|
98b91ce2a2 | ||
|
|
9d4a480f54 | ||
|
|
ffb066777e | ||
|
|
d3d034e572 | ||
|
|
45ddd67bd6 | ||
|
|
522a849ba9 | ||
|
|
99b7a33f90 | ||
|
|
822942a2e4 | ||
|
|
9bd2c0389f | ||
|
|
fbede85e20 | ||
|
|
21d46dea93 | ||
|
|
e7ee314c99 | ||
|
|
7e0b64e8b0 | ||
|
|
c62b7fbd7e | ||
|
|
9f80c8e1c5 | ||
|
|
5474528db1 | ||
|
|
fc1c37c4ac | ||
|
|
9c82780742 | ||
|
|
d1d517a9b7 | ||
|
|
961e1d360f | ||
|
|
777ff271cb | ||
|
|
2edd89b450 | ||
|
|
c4294bd4d7 | ||
|
|
04d13d9945 | ||
|
|
7f0c55f249 | ||
|
|
22000c6a09 | ||
|
|
aa721b972f | ||
|
|
72285115ec | ||
|
|
c8ae619f1b | ||
|
|
4ba49a4236 | ||
|
|
af66d8c9d8 | ||
|
|
e1e4ef4abf | ||
|
|
39ff9b8ee9 | ||
|
|
a12eeeff62 | ||
|
|
5a1e326577 | ||
|
|
482cb62cf5 | ||
|
|
fe2dddd3fe | ||
|
|
acb874eaf6 | ||
|
|
1802b60833 | ||
|
|
72785a2e86 | ||
|
|
5346d8b9a2 | ||
|
|
f46e43eb38 | ||
|
|
e3a97b0981 | ||
|
|
c475f25786 | ||
|
|
7b2d018f84 | ||
|
|
9d41770a99 | ||
|
|
eec6d33e14 | ||
|
|
ead3eefe5b | ||
|
|
5670a7cf7b | ||
|
|
7b101ab912 | ||
|
|
f81c319ece | ||
|
|
78d4c3b88a | ||
|
|
7fe02a04db | ||
|
|
2e39418124 | ||
|
|
c962567814 | ||
|
|
8a282a5fee | ||
|
|
2c8cbfc26a | ||
|
|
acbfe67fb8 | ||
|
|
aeac4743cc | ||
|
|
e826ca3c49 | ||
|
|
5b880fbcff | ||
|
|
c081b60ef6 | ||
|
|
8dc39b69b7 | ||
|
|
e4913f533a | ||
|
|
0cf9f86292 | ||
|
|
d70135ec71 | ||
|
|
e179787d40 | ||
|
|
0cf8812b17 | ||
|
|
f7f034a8be | ||
|
|
d1671d6175 | ||
|
|
4da3de79a7 | ||
|
|
f5b116c687 | ||
|
|
be06316c84 | ||
|
|
8ba8497ac8 | ||
|
|
f9cce282da | ||
|
|
23fa2e1f1b | ||
|
|
ba9cdeaed9 | ||
|
|
8ed1400949 | ||
|
|
14fa800773 | ||
|
|
f3ab967a46 | ||
|
|
a1eb2f6c6b | ||
|
|
41bf54a906 | ||
|
|
88e0973f7d | ||
|
|
1e3364a014 | ||
|
|
a4e0a46b73 | ||
|
|
3ee7c0bfba | ||
|
|
af77077f3c | ||
|
|
4b9e8505cb | ||
|
|
98cb8c9783 | ||
|
|
3a1f876802 | ||
|
|
362729c2bb | ||
|
|
4d7cd8055b | ||
|
|
756440e0a2 | ||
|
|
5398420103 | ||
|
|
c6daa48368 | ||
|
|
657832a95a | ||
|
|
51b3428f5c | ||
|
|
9e4cd19878 | ||
|
|
816dfce8fe | ||
|
|
71862b7b8d | ||
|
|
1844bff613 | ||
|
|
a76c503dc6 | ||
|
|
fee26405e8 | ||
|
|
dbf4062acd | ||
|
|
06a57baa59 | ||
|
|
79e924b34f | ||
|
|
0c9a284f8d | ||
|
|
245e1c9ef7 | ||
|
|
86f7089396 | ||
|
|
470f1efcc8 | ||
|
|
e0b9298134 | ||
|
|
5d1459b584 | ||
|
|
4e3b6bfb26 | ||
|
|
7a86e49312 | ||
|
|
78e634dd5c | ||
|
|
6d96cf4f05 | ||
|
|
744d65f173 | ||
|
|
e7ac73177e | ||
|
|
1b69c6b56b | ||
|
|
e5084a4787 | ||
|
|
eddb433d7c | ||
|
|
0787b79347 | ||
|
|
823d916a11 | ||
|
|
378a0e51bf | ||
|
|
e8926867d8 | ||
|
|
0f076e5fbe | ||
|
|
ac232caef3 | ||
|
|
e9c9c558d7 | ||
|
|
f05c2a9558 | ||
|
|
d70871f41b | ||
|
|
fc1eb46587 | ||
|
|
ddee2d641f | ||
|
|
f2ada0a604 | ||
|
|
1d5fcc2281 | ||
|
|
94e0176ac2 | ||
|
|
f76684a05c | ||
|
|
e645442c9b | ||
|
|
d041476819 | ||
|
|
62f97a69e9 | ||
|
|
d9481e3648 | ||
|
|
d4cf204087 | ||
|
|
d069eb62df | ||
|
|
b2afc65908 | ||
|
|
1014f8c84b | ||
|
|
b73cfe8786 | ||
|
|
747f99fdc1 | ||
|
|
eb08609de1 | ||
|
|
e93e8e730d | ||
|
|
4c4bf0f131 | ||
|
|
d88a639838 | ||
|
|
c37faf3ac1 | ||
|
|
a44c621d2d | ||
|
|
3e7752c29d | ||
|
|
1b77bf6f20 | ||
|
|
8fc8368438 | ||
|
|
228bba799d | ||
|
|
59497c362b | ||
|
|
2e5b2a9537 | ||
|
|
866b343c0c | ||
|
|
9d5ba576ee | ||
|
|
1b5110e91f | ||
|
|
60827f7562 | ||
|
|
ed18ffdca3 | ||
|
|
0e1c492d3e | ||
|
|
9010ff5f96 | ||
|
|
ab0835463f | ||
|
|
8b7ca8fd99 | ||
|
|
ed896f508b | ||
|
|
319ecb3005 | ||
|
|
f46ed4aac8 | ||
|
|
6a7d4182b4 | ||
|
|
c1f264822a | ||
|
|
a7e8fbf3a7 | ||
|
|
c9ef824ddf | ||
|
|
2a23eca368 | ||
|
|
c69ec87f67 | ||
|
|
8fb2c2a0e8 | ||
|
|
96e132b4b0 | ||
|
|
99c9a35982 | ||
|
|
65a3dfe235 | ||
|
|
048ac8d94b | ||
|
|
21bfd7fba9 | ||
|
|
77e711f70b | ||
|
|
0dd6b92a64 | ||
|
|
9dcee69ac2 | ||
|
|
b522ad0052 | ||
|
|
b9508ffecb | ||
|
|
a6ac611e77 | ||
|
|
e7bf25844f | ||
|
|
bcf10d5bae | ||
|
|
5997e75c84 | ||
|
|
97ceeed054 | ||
|
|
3ef9e453b7 | ||
|
|
c7b324d3f2 | ||
|
|
cfd42be0fe | ||
|
|
f1f243d749 | ||
|
|
fcce9ed4db | ||
|
|
c21b4fcc93 | ||
|
|
86cf8ee3f9 | ||
|
|
6f1ccb6c49 | ||
|
|
a076b48202 | ||
|
|
a7358bc69f | ||
|
|
27909e5d2a | ||
|
|
1e073817de | ||
|
|
2bb1a87d41 | ||
|
|
d0d9ef16f7 | ||
|
|
60b833403c | ||
|
|
debf8f70c9 | ||
|
|
4519ef3109 | ||
|
|
86f2765b32 | ||
|
|
e2f5471545 | ||
|
|
4905640e9b | ||
|
|
4c60ab83c8 | ||
|
|
5051a1f7bc | ||
|
|
8711af608f | ||
|
|
9926ae768e | ||
|
|
5df6cf563a | ||
|
|
2c26d95ab9 | ||
|
|
a2a68df521 | ||
|
|
43348022d6 | ||
|
|
40dbad9915 | ||
|
|
2585187880 | ||
|
|
d76952c674 | ||
|
|
7b162f5c54 | ||
|
|
70592cc4d8 | ||
|
|
90997ab1b5 | ||
|
|
b738add80c | ||
|
|
968e955c46 | ||
|
|
ebf815ee57 | ||
|
|
8db7e74b87 | ||
|
|
4253e67c07 | ||
|
|
671c5c9b84 | ||
|
|
f2aa1bf50e | ||
|
|
90465f727f | ||
|
|
621c0e629d | ||
|
|
c0e8fb5085 | ||
|
|
d2eab536ac | ||
|
|
18bd5b627a | ||
|
|
4474a5ec70 | ||
|
|
3cb7013c38 | ||
|
|
5b8888b01b | ||
|
|
439312b9c0 | ||
|
|
f1cf10150a | ||
|
|
50b87c3445 | ||
|
|
f2119c35de | ||
|
|
d35c08724c | ||
|
|
1c6d9d20e4 | ||
|
|
4695414393 | ||
|
|
def5c8cf12 | ||
|
|
b6da8880c2 | ||
|
|
a453628c4e | ||
|
|
4e4224213f | ||
|
|
b5b3b85697 | ||
|
|
18d2c45c33 | ||
|
|
c3df21fe82 | ||
|
|
bcec8be035 | ||
|
|
9a415b0572 | ||
|
|
40da355234 | ||
|
|
f965a4db15 | ||
|
|
75ffa2bf1c | ||
|
|
086d6cbe8c | ||
|
|
6cc3f4d87c | ||
|
|
3cfd9757a7 | ||
|
|
882622ec10 | ||
|
|
1ecf814838 | ||
|
|
e4a03f249d | ||
|
|
56d8aa42b3 | ||
|
|
79e9f20578 | ||
|
|
ab24925c94 | ||
|
|
0ae41cc663 | ||
|
|
422d04c8ba | ||
|
|
2233dd45bd | ||
|
|
9199f3f613 | ||
|
|
6c1a4b5137 | ||
|
|
c7bb998497 | ||
|
|
7b72436c75 | ||
|
|
a0234affb6 | ||
|
|
9390a810eb | ||
|
|
a49d80b89c | ||
|
|
ccfe75ec4a | ||
|
|
d586945d69 | ||
|
|
ae88965ff6 | ||
|
|
2338134836 | ||
|
|
1b33a50e6d | ||
|
|
3c7bb6b571 | ||
|
|
5fa540bdc9 | ||
|
|
52727863e1 | ||
|
|
e3f840e6a6 | ||
|
|
ed63e1f378 | ||
|
|
55b7118c98 | ||
|
|
5a25b75b1d | ||
|
|
a4d9539544 | ||
|
|
1bb8e02a96 | ||
|
|
6de7effb05 | ||
|
|
25a3c8b172 | ||
|
|
85be2a554e | ||
|
|
1d4afb179b | ||
|
|
660bd4a53e | ||
|
|
81b9bdf400 | ||
|
|
926127c774 | ||
|
|
03085c2da2 | ||
|
|
7af4b5086a | ||
|
|
60b2ae5f5a | ||
|
|
a6349f5063 | ||
|
|
22bcfca87a | ||
|
|
0d985ede28 | ||
|
|
1e3469789d | ||
|
|
5f68fbae37 | ||
|
|
e276f35f86 | ||
|
|
8e62a3d62a | ||
|
|
48aaccab8f | ||
|
|
4162ebe8b5 | ||
|
|
551b6322f5 | ||
|
|
52c4e15eb2 | ||
|
|
5483ac6b0a | ||
|
|
7457133307 | ||
|
|
a59930a327 | ||
|
|
85c023db88 | ||
|
|
4cbd36f341 | ||
|
|
e42f833fd4 | ||
|
|
ad3e990c6a | ||
|
|
676212fa8f | ||
|
|
3035572034 | ||
|
|
d741c7b478 | ||
|
|
15f621141d | ||
|
|
dc359bd3a5 | ||
|
|
976819537d | ||
|
|
100ff08de9 | ||
|
|
f996b10f47 | ||
|
|
36d7180ca2 | ||
|
|
b021f1e505 | ||
|
|
90794260bc | ||
|
|
b6a510a3e7 | ||
|
|
e415c326f9 | ||
|
|
28e9e9e714 | ||
|
|
3ebfa99f2c | ||
|
|
91b488f9a5 | ||
|
|
f25d727035 | ||
|
|
411bc5e49f | ||
|
|
858875fbb8 | ||
|
|
1eaa42cd25 | ||
|
|
8c9df30e28 | ||
|
|
52771e1287 | ||
|
|
f39138aa2e | ||
|
|
8a962ffc46 | ||
|
|
3421e4dcd7 | ||
|
|
976b1c2ef7 | ||
|
|
d95894152b | ||
|
|
37a548414b | ||
|
|
853dd34d31 | ||
|
|
d6e2fb453d | ||
|
|
ec9bff5234 | ||
|
|
6797d85851 | ||
|
|
cdf3a74f48 | ||
|
|
41f91318e9 | ||
|
|
e0adc5e807 | ||
|
|
2137ecc130 | ||
|
|
67fd428354 | ||
|
|
f22ada442a | ||
|
|
ed1de13548 | ||
|
|
4f83eec782 | ||
|
|
e0f8936455 | ||
|
|
f2351dc758 | ||
|
|
db5d7602fe | ||
|
|
dff3deb2a9 | ||
|
|
9d4f59b836 | ||
|
|
d2c7f8dbcf | ||
|
|
8283ca7ddb | ||
|
|
59cc6d36c9 | ||
|
|
af8793c01a | ||
|
|
0b0a8b3128 | ||
|
|
7ced9e416b | ||
|
|
af3ba5145a | ||
|
|
cf737ec85c | ||
|
|
d32f7d2416 | ||
|
|
dc6567c677 | ||
|
|
08dabab024 | ||
|
|
8a9eecce7f | ||
|
|
b089587b42 | ||
|
|
7fd51e6ade | ||
|
|
966b5bdf6e | ||
|
|
021b5cc7f6 | ||
|
|
28d75ec801 | ||
|
|
792b12573e | ||
|
|
4f2ef36701 | ||
|
|
6b1b595951 | ||
|
|
87bdc42bf8 | ||
|
|
90ba63948a | ||
|
|
cce4d21ccb | ||
|
|
c1f7399a86 | ||
|
|
44a89a3537 | ||
|
|
a8dbc64319 | ||
|
|
af6e6cd350 | ||
|
|
ad4bd92fec | ||
|
|
f571ee8876 | ||
|
|
11e36d0bfb | ||
|
|
354a08c25a | ||
|
|
e70f27c8e4 | ||
|
|
fcebdf6720 | ||
|
|
9e9026452c | ||
|
|
45b70ae031 | ||
|
|
4429826229 | ||
|
|
1386707ceb | ||
|
|
1a35895ac8 | ||
|
|
6941d1bb35 | ||
|
|
23314daee4 | ||
|
|
3c8156a55a | ||
|
|
ffd3bf8448 | ||
|
|
da33dd04cc | ||
|
|
d8f0bc3e60 | ||
|
|
1809efa350 | ||
|
|
39eba4e154 | ||
|
|
eb4e23b91e | ||
|
|
6485e68beb | ||
|
|
d470945503 | ||
|
|
8985a1fa63 | ||
|
|
6dd817cbbc | ||
|
|
0b3a87a323 | ||
|
|
e1edd2aa6a | ||
|
|
9a0bfafef6 | ||
|
|
a335caaedb | ||
|
|
ff3c4bfc76 | ||
|
|
8d2dd7e554 | ||
|
|
71e5939441 | ||
|
|
d91ea9b59d | ||
|
|
9b6b792ce7 | ||
|
|
57af99d901 | ||
|
|
a58d5897e4 | ||
|
|
ddbdffb4e5 | ||
|
|
d6dd43cdaa | ||
|
|
75cbe4a1c1 | ||
|
|
27c1563bf0 | ||
|
|
4d7b29cd8f | ||
|
|
90970d0ddc | ||
|
|
bb0a9b3d6d | ||
|
|
8992596192 | ||
|
|
c20fbed2f7 | ||
|
|
c4157549ab | ||
|
|
fbd1e79465 | ||
|
|
1efacaa8d3 | ||
|
|
98b42e9eb2 | ||
|
|
2449bf7300 | ||
|
|
3362da0a69 | ||
|
|
4514842a63 | ||
|
|
a97d6995c9 | ||
|
|
d9d4f3e629 | ||
|
|
7a8aeff4b0 | ||
|
|
de5a6010f0 | ||
|
|
da95f4aa6d | ||
|
|
4f8769175e | ||
|
|
40c887baf7 | ||
|
|
d3e8889411 | ||
|
|
d17969e378 | ||
|
|
07263298bd | ||
|
|
5a2e69df81 | ||
|
|
f5f1416a14 | ||
|
|
4d36647eea | ||
|
|
8fd8f800d0 | ||
|
|
87991059aa | ||
|
|
c69dbb25ce | ||
|
|
bc8874020f | ||
|
|
55d7238708 | ||
|
|
4a037f9fe6 | ||
|
|
aa40cfcbb9 | ||
|
|
6d6d103f15 | ||
|
|
239ebe2076 | ||
|
|
0cba0e11b5 | ||
|
|
d4e6720541 | ||
|
|
dcb8f88525 | ||
|
|
a2a62c9be6 | ||
|
|
3191ee8bad | ||
|
|
308b7e3bbe | ||
|
|
73ea5effe5 | ||
|
|
d1afa0ed6c | ||
|
|
ca00cd6a78 | ||
|
|
4daca1a634 | ||
|
|
bc00a032c1 | ||
|
|
5f4d8e031e | ||
|
|
7b2c4bb493 | ||
|
|
5f93220c61 | ||
|
|
ec53ce359b | ||
|
|
1f68318875 | ||
|
|
1ccc0918f5 | ||
|
|
fc031d980b |
@@ -1,246 +1,126 @@
|
||||
version: 2
|
||||
version: 2.1
|
||||
|
||||
defaults: &defaults
|
||||
working_directory: /go/src/github.com/tendermint/tendermint
|
||||
docker:
|
||||
- image: circleci/golang:1.11.4
|
||||
environment:
|
||||
GOBIN: /tmp/workspace/bin
|
||||
executors:
|
||||
golang:
|
||||
docker:
|
||||
- image: tendermintdev/docker-tendermint-build
|
||||
working_directory: /go/src/github.com/tendermint/tendermint
|
||||
environment:
|
||||
GOBIN: /tmp/bin
|
||||
release:
|
||||
machine: true
|
||||
docs:
|
||||
docker:
|
||||
- image: tendermintdev/docker-website-deployment
|
||||
environment:
|
||||
AWS_REGION: us-east-1
|
||||
|
||||
docs_update_config: &docs_update_config
|
||||
working_directory: ~/repo
|
||||
docker:
|
||||
- image: tendermint/docs_deployment
|
||||
environment:
|
||||
AWS_REGION: us-east-1
|
||||
commands:
|
||||
run_test:
|
||||
parameters:
|
||||
script_path:
|
||||
type: string
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore go modules cache"
|
||||
keys:
|
||||
- go-mod-v1-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
name: "Running test"
|
||||
command: |
|
||||
bash << parameters.script_path >>
|
||||
|
||||
jobs:
|
||||
setup_dependencies:
|
||||
<<: *defaults
|
||||
executor: golang
|
||||
steps:
|
||||
- run: mkdir -p /tmp/workspace/bin
|
||||
- run: mkdir -p /tmp/workspace/profiles
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore go modules cache"
|
||||
keys:
|
||||
- v3-pkg-cache
|
||||
- go-mod-v1-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
command: |
|
||||
mkdir -p /tmp/bin
|
||||
- run:
|
||||
name: Cache go modules
|
||||
command: make go-mod-cache
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
command: make tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run:
|
||||
name: binaries
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make install install_abci
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- bin
|
||||
- profiles
|
||||
name: "Build binaries"
|
||||
command: make install install_abci
|
||||
- save_cache:
|
||||
key: v3-pkg-cache
|
||||
name: "Save go modules cache"
|
||||
key: go-mod-v1-{{ checksum "go.sum" }}
|
||||
paths:
|
||||
- /go/pkg
|
||||
# - save_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
# paths:
|
||||
# - /go/src/github.com/tendermint/tendermint
|
||||
|
||||
build_slate:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
# https://discuss.circleci.com/t/saving-cache-stopped-working-warning-skipping-this-step-disabled-in-configuration/24423/2
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- run:
|
||||
name: slate docs
|
||||
command: |
|
||||
set -ex
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make build-slate
|
||||
|
||||
lint:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
make get_dev_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- run:
|
||||
name: metalinter
|
||||
command: |
|
||||
set -ex
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make metalinter
|
||||
- run:
|
||||
name: check_dep
|
||||
command: |
|
||||
set -ex
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make check_dep
|
||||
- "/go/pkg/mod"
|
||||
- save_cache:
|
||||
name: "Save source code cache"
|
||||
key: go-src-v1-{{ .Revision }}
|
||||
paths:
|
||||
- ".git"
|
||||
- persist_to_workspace:
|
||||
root: "/tmp/bin"
|
||||
paths:
|
||||
- "."
|
||||
|
||||
test_abci_apps:
|
||||
<<: *defaults
|
||||
executor: golang
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
- run_test:
|
||||
script_path: abci/tests/test_app/test.sh
|
||||
|
||||
- run:
|
||||
name: Run abci apps tests
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
bash abci/tests/test_app/test.sh
|
||||
|
||||
# if this test fails, fix it and update the docs at:
|
||||
# https://github.com/tendermint/tendermint/blob/develop/docs/abci-cli.md
|
||||
# if this test fails, fix it and update the docs at:
|
||||
# https://github.com/tendermint/tendermint/blob/master/docs/abci-cli.md
|
||||
test_abci_cli:
|
||||
<<: *defaults
|
||||
executor: golang
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- run:
|
||||
name: Run abci-cli tests
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
bash abci/tests/test_cli/test.sh
|
||||
- run_test:
|
||||
script_path: abci/tests/test_cli/test.sh
|
||||
|
||||
test_apps:
|
||||
<<: *defaults
|
||||
executor: golang
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
- run_test:
|
||||
script_path: test/app/test.sh
|
||||
|
||||
- run: sudo apt-get update && sudo apt-get install -y --no-install-recommends bsdmainutils
|
||||
- run:
|
||||
name: Run tests
|
||||
command: bash test/app/test.sh
|
||||
test_persistence:
|
||||
executor: golang
|
||||
steps:
|
||||
- run_test:
|
||||
script_path: test/persist/test_failure_indices.sh
|
||||
|
||||
test_cover:
|
||||
<<: *defaults
|
||||
executor: golang
|
||||
parallelism: 4
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore go module cache"
|
||||
keys:
|
||||
- go-mod-v2-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- run: mkdir -p /tmp/logs
|
||||
- run:
|
||||
name: Run tests
|
||||
name: "Run tests"
|
||||
command: |
|
||||
export VERSION="$(git describe --tags --long | sed 's/v\(.*\)/\1/')"
|
||||
export GO111MODULE=on
|
||||
mkdir -p /tmp/logs /tmp/workspace/profiles
|
||||
for pkg in $(go list github.com/tendermint/tendermint/... | circleci tests split --split-by=timings); do
|
||||
id=$(basename "$pkg")
|
||||
|
||||
GOCACHE=off go test -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
|
||||
go test -v -timeout 5m -mod=readonly -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
|
||||
done
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
@@ -249,54 +129,25 @@ jobs:
|
||||
- store_artifacts:
|
||||
path: /tmp/logs
|
||||
|
||||
test_persistence:
|
||||
<<: *defaults
|
||||
localnet:
|
||||
working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint
|
||||
machine:
|
||||
image: circleci/classic:latest
|
||||
environment:
|
||||
GOBIN: /home/circleci/.go_workspace/bin
|
||||
GOPATH: /home/circleci/.go_workspace/
|
||||
GOOS: linux
|
||||
GOARCH: amd64
|
||||
parallelism: 1
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
name: run localnet and exit on failure
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- run:
|
||||
name: Run tests
|
||||
command: bash test/persist/test_failure_indices.sh
|
||||
|
||||
localnet:
|
||||
working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint
|
||||
machine:
|
||||
image: circleci/classic:latest
|
||||
environment:
|
||||
GOBIN: /home/circleci/.go_workspace/bin
|
||||
GOPATH: /home/circleci/.go_workspace/
|
||||
GOOS: linux
|
||||
GOARCH: amd64
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: run localnet and exit on failure
|
||||
command: |
|
||||
set -x
|
||||
make get_tools
|
||||
make get_vendor_deps
|
||||
make build-linux
|
||||
make localnet-start &
|
||||
./scripts/localnet-blocks-test.sh 40 5 10 localhost
|
||||
set -x
|
||||
docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux
|
||||
make localnet-start &
|
||||
./scripts/localnet-blocks-test.sh 40 5 10 localhost
|
||||
|
||||
test_p2p:
|
||||
environment:
|
||||
@@ -313,31 +164,22 @@ jobs:
|
||||
path: /home/circleci/project/test/p2p/logs
|
||||
|
||||
upload_coverage:
|
||||
<<: *defaults
|
||||
executor: golang
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- restore_cache:
|
||||
name: "Restore go module cache"
|
||||
keys:
|
||||
- go-mod-v2-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
name: gather
|
||||
command: |
|
||||
set -ex
|
||||
|
||||
echo "mode: atomic" > coverage.txt
|
||||
for prof in $(ls /tmp/workspace/profiles/); do
|
||||
tail -n +2 /tmp/workspace/profiles/"$prof" >> coverage.txt
|
||||
@@ -347,28 +189,205 @@ jobs:
|
||||
command: bash .circleci/codecov.sh -f coverage.txt
|
||||
|
||||
deploy_docs:
|
||||
<<: *docs_update_config
|
||||
executor: docs
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Trigger website build
|
||||
name: "Build docs"
|
||||
command: make build-docs
|
||||
- run:
|
||||
name: "Sync to S3"
|
||||
command: make sync-docs
|
||||
|
||||
prepare_build:
|
||||
executor: golang
|
||||
steps:
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- run:
|
||||
name: Get next release number
|
||||
command: |
|
||||
chamber exec tendermint -- start_website_build
|
||||
export LAST_TAG="`git describe --tags --abbrev=0 --match "${CIRCLE_BRANCH}.*"`"
|
||||
echo "Last tag: ${LAST_TAG}"
|
||||
if [ -z "${LAST_TAG}" ]; then
|
||||
export LAST_TAG="${CIRCLE_BRANCH}"
|
||||
echo "Last tag not found. Possibly fresh branch or feature branch. Setting ${LAST_TAG} as tag."
|
||||
fi
|
||||
export NEXT_TAG="`python -u scripts/release_management/bump-semver.py --version "${LAST_TAG}"`"
|
||||
echo "Next tag: ${NEXT_TAG}"
|
||||
echo "export CIRCLE_TAG=\"${NEXT_TAG}\"" > release-version.source
|
||||
- run:
|
||||
name: Build dependencies
|
||||
command: make tools
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- "release-version.source"
|
||||
- save_cache:
|
||||
key: v2-release-deps-{{ checksum "go.sum" }}
|
||||
paths:
|
||||
- "/go/pkg/mod"
|
||||
|
||||
build_artifacts:
|
||||
executor: golang
|
||||
parallelism: 4
|
||||
steps:
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore release dependencies cache"
|
||||
keys:
|
||||
- v2-release-deps-{{ checksum "go.sum" }}
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: Build artifact
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 0 ;then export GOOS=linux GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 1 ;then export GOOS=darwin GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 2 ;then export GOOS=windows GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 3 ;then export GOOS=linux GOARCH=arm && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
- persist_to_workspace:
|
||||
root: build
|
||||
paths:
|
||||
- "*.zip"
|
||||
- "tendermint_linux_amd64"
|
||||
|
||||
release_artifacts:
|
||||
executor: golang
|
||||
steps:
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: "Deploy to GitHub"
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
echo "---"
|
||||
ls -la /tmp/workspace/*.zip
|
||||
echo "---"
|
||||
python -u scripts/release_management/sha-files.py
|
||||
echo "---"
|
||||
cat /tmp/workspace/SHA256SUMS
|
||||
echo "---"
|
||||
export RELEASE_ID="`python -u scripts/release_management/github-draft.py`"
|
||||
echo "Release ID: ${RELEASE_ID}"
|
||||
#Todo: Parallelize uploads
|
||||
export GOOS=linux GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=darwin GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=windows GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=linux GOARCH=arm && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
python -u scripts/release_management/github-upload.py --file "/tmp/workspace/SHA256SUMS" --id "${RELEASE_ID}"
|
||||
python -u scripts/release_management/github-publish.py --id "${RELEASE_ID}"
|
||||
|
||||
release_docker:
|
||||
machine:
|
||||
image: ubuntu-1604:201903-01
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: "Deploy to Docker Hub"
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
cp /tmp/workspace/tendermint_linux_amd64 DOCKER/tendermint
|
||||
docker build --label="tendermint" --tag="tendermint/tendermint:${CIRCLE_TAG}" --tag="tendermint/tendermint:latest" "DOCKER"
|
||||
docker login -u "${DOCKERHUB_USER}" --password-stdin \<<< "${DOCKERHUB_PASS}"
|
||||
docker push "tendermint/tendermint"
|
||||
docker logout
|
||||
|
||||
reproducible_builds:
|
||||
executor: golang
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
docker_layer_caching: true
|
||||
- run:
|
||||
name: Build tendermint
|
||||
no_output_timeout: 20m
|
||||
command: |
|
||||
sudo apt-get install -y ruby
|
||||
bash -x ./scripts/gitian-build.sh all
|
||||
for os in darwin linux windows; do
|
||||
cp gitian-build-${os}/result/tendermint-${os}-res.yml .
|
||||
cp gitian-build-${os}/build/out/tendermint-*.tar.gz .
|
||||
rm -rf gitian-build-${os}/
|
||||
done
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/tendermint/tendermint/tendermint-darwin-res.yml
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/tendermint/tendermint/tendermint-linux-res.yml
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/tendermint/tendermint/tendermint-windows-res.yml
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/tendermint/tendermint/tendermint-*.tar.gz
|
||||
|
||||
# # Test RPC implementation against the swagger documented specs
|
||||
# contract_tests:
|
||||
# working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint
|
||||
# machine:
|
||||
# image: circleci/classic:latest
|
||||
# environment:
|
||||
# GOBIN: /home/circleci/.go_workspace/bin
|
||||
# GOPATH: /home/circleci/.go_workspace/
|
||||
# GOOS: linux
|
||||
# GOARCH: amd64
|
||||
# parallelism: 1
|
||||
# steps:
|
||||
# - checkout
|
||||
# - run:
|
||||
# name: Test RPC endpoints against swagger documentation
|
||||
# command: |
|
||||
# set -x
|
||||
# export PATH=~/.local/bin:$PATH
|
||||
|
||||
# # install node and dredd
|
||||
# ./scripts/get_nodejs.sh
|
||||
|
||||
# # build the binaries with a proper version of Go
|
||||
# docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux build-contract-tests-hooks
|
||||
|
||||
# # This docker image works with go 1.7, we can install here the hook handler that contract-tests is going to use
|
||||
# go get github.com/snikch/goodman/cmd/goodman
|
||||
# make contract-tests
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
test-suite:
|
||||
jobs:
|
||||
- deploy_docs:
|
||||
context: tendermint-docs
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- develop
|
||||
tags:
|
||||
only:
|
||||
- /^v.*/
|
||||
- deploy_docs:
|
||||
context: tendermint-docs-staging
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- docs-theme-latest
|
||||
- setup_dependencies
|
||||
- lint:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
- test_abci_apps:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
@@ -391,3 +410,36 @@ workflows:
|
||||
- upload_coverage:
|
||||
requires:
|
||||
- test_cover
|
||||
- reproducible_builds:
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /v[0-9]+\.[0-9]+/
|
||||
# - contract_tests:
|
||||
# requires:
|
||||
# - setup_dependencies
|
||||
|
||||
release:
|
||||
jobs:
|
||||
- prepare_build
|
||||
- build_artifacts:
|
||||
requires:
|
||||
- prepare_build
|
||||
- release_artifacts:
|
||||
requires:
|
||||
- prepare_build
|
||||
- build_artifacts
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /v[0-9]+\.[0-9]+/
|
||||
- release_docker:
|
||||
requires:
|
||||
- prepare_build
|
||||
- build_artifacts
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /v[0-9]+\.[0-9]+/
|
||||
- master
|
||||
|
||||
8
.github/CODEOWNERS
vendored
8
.github/CODEOWNERS
vendored
@@ -1,7 +1,9 @@
|
||||
# CODEOWNERS: https://help.github.com/articles/about-codeowners/
|
||||
|
||||
# Everything goes through Bucky, Anton, Alex. For now.
|
||||
* @ebuchman @melekes @xla
|
||||
# Everything goes through Bucky, Anton, Tess. For now.
|
||||
* @ebuchman @melekes @tessr
|
||||
|
||||
# Precious documentation
|
||||
/docs/ @zramsay
|
||||
/docs/README.md @zramsay
|
||||
/docs/DOCS_README.md @zramsay
|
||||
/docs/.vuepress/ @zramsay
|
||||
|
||||
9
.github/PULL_REQUEST_TEMPLATE.md
vendored
9
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,5 +1,12 @@
|
||||
<!-- Thanks for filing a PR! Before hitting the button, please check the following items.-->
|
||||
<!--
|
||||
|
||||
Thanks for filing a PR! Before hitting the button, please check the following items.
|
||||
Please note that every non-trivial PR must reference an issue that explains the
|
||||
changes in the PR.
|
||||
|
||||
-->
|
||||
|
||||
* [ ] Referenced an issue explaining the need for the change
|
||||
* [ ] Updated all relevant documentation in docs
|
||||
* [ ] Updated all code comments where relevant
|
||||
* [ ] Wrote tests
|
||||
|
||||
47
.github/stale.yml
vendored
Normal file
47
.github/stale.yml
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
# Configuration for probot-stale - https://github.com/probot/stale
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request becomes stale
|
||||
daysUntilStale: 60
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request with the stale label is closed.
|
||||
# Set to false to disable. If disabled, issues still need to be closed manually, but will remain marked as stale.
|
||||
daysUntilClose: 9
|
||||
|
||||
# Only issues or pull requests with all of these labels are check if stale. Defaults to `[]` (disabled)
|
||||
onlyLabels: []
|
||||
|
||||
# Issues or Pull Requests with these labels will never be considered stale. Set to `[]` to disable
|
||||
exemptLabels:
|
||||
- major-release
|
||||
|
||||
# Set to true to ignore issues in a project (defaults to false)
|
||||
exemptProjects: true
|
||||
|
||||
# Set to true to ignore issues in a milestone (defaults to false)
|
||||
exemptMilestones: true
|
||||
|
||||
# Set to true to ignore issues with an assignee (defaults to false)
|
||||
exemptAssignees: false
|
||||
|
||||
# Label to use when marking as stale
|
||||
staleLabel: stale
|
||||
|
||||
# Comment to post when marking as stale. Set to `false` to disable
|
||||
markComment: >
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions.
|
||||
|
||||
# Limit the number of actions per hour, from 1-30. Default is 30
|
||||
limitPerRun: 30
|
||||
|
||||
Limit to only `issues` or `pulls`
|
||||
only: pulls
|
||||
|
||||
Optionally, specify configuration settings that are specific to just 'issues' or 'pulls':
|
||||
pulls:
|
||||
daysUntilStale: 30
|
||||
markComment: >
|
||||
This pull request has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions.
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -15,6 +15,7 @@ test/logs
|
||||
coverage.txt
|
||||
docs/_build
|
||||
docs/dist
|
||||
docs/.vuepress/dist
|
||||
*.log
|
||||
abci-cli
|
||||
docs/node_modules/
|
||||
@@ -35,6 +36,7 @@ shunit2
|
||||
addrbook.json
|
||||
|
||||
*/vendor
|
||||
.vendor-new/
|
||||
*/.glide
|
||||
.terraform
|
||||
terraform.tfstate
|
||||
@@ -42,3 +44,5 @@ terraform.tfstate.backup
|
||||
terraform.tfstate.d
|
||||
|
||||
.vscode
|
||||
|
||||
profile\.out
|
||||
|
||||
70
.golangci.yml
Normal file
70
.golangci.yml
Normal file
@@ -0,0 +1,70 @@
|
||||
linters:
|
||||
enable:
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
# - errcheck
|
||||
# - funlen
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
- goconst
|
||||
- gocritic
|
||||
# - gocyclo
|
||||
# - godox
|
||||
- gofmt
|
||||
- goimports
|
||||
- golint
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- interfacer
|
||||
- lll
|
||||
- misspell
|
||||
- maligned
|
||||
- nakedret
|
||||
- prealloc
|
||||
- scopelint
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- stylecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
# - unparam
|
||||
- unused
|
||||
- varcheck
|
||||
# - whitespace
|
||||
# - wsl
|
||||
# - gocognit
|
||||
disable:
|
||||
- errcheck
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- linters:
|
||||
- lll
|
||||
source: "https://"
|
||||
|
||||
linters-settings:
|
||||
dogsled:
|
||||
max-blank-identifiers: 3
|
||||
maligned:
|
||||
suggest-new: true
|
||||
# govet:
|
||||
# check-shadowing: true
|
||||
golint:
|
||||
min-confidence: 0
|
||||
# gocyclo:
|
||||
# min-complexity: 10
|
||||
# misspell:
|
||||
# locale: US
|
||||
# gocritic:
|
||||
# enabled-tags:
|
||||
# - performance
|
||||
# - style
|
||||
# - experimental
|
||||
# disabled-checks:
|
||||
# - wrapperFunc
|
||||
# - commentFormatting # https://github.com/go-critic/go-critic/issues/755
|
||||
1228
CHANGELOG.md
1228
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
@@ -1,40 +1,24 @@
|
||||
## v0.28.0
|
||||
## v0.33.1
|
||||
|
||||
*TBD*
|
||||
\*\*
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
- [cli] Removed `node` `--proxy_app=dummy` option. Use `kvstore` (`persistent_kvstore`) instead.
|
||||
- [cli] Renamed `node` `--proxy_app=nilapp` to `--proxy_app=noop`.
|
||||
- [config] \#2992 `allow_duplicate_ip` is now set to false
|
||||
- [privval] \#2926 split up `PubKeyMsg` into `PubKeyRequest` and `PubKeyResponse` to be consistent with other message types
|
||||
- [privval] \#2923 listen for unix socket connections instead of dialing them
|
||||
- CLI/RPC/Config
|
||||
|
||||
* Apps
|
||||
- Apps
|
||||
|
||||
* Go API
|
||||
- [types] \#2926 memoize consensus public key on initialization of remote signer and return the memoized key on
|
||||
`PrivValidator.GetPubKey()` instead of requesting it again
|
||||
- [types] \#2981 Remove `PrivValidator.GetAddress()`
|
||||
|
||||
* Blockchain Protocol
|
||||
|
||||
* P2P Protocol
|
||||
- Go API
|
||||
|
||||
### FEATURES:
|
||||
- [privval] \#1181 Split immutable and mutable parts of `priv_validator.json`
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [p2p/conn] \#3111 make SecretConnection thread safe
|
||||
- [privval] \#2923 retry RemoteSigner connections on error
|
||||
- [rpc] \#3047 Include peer's remote IP in `/net_info`
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
- [types] \#2926 do not panic if retrieving the private validator's public key fails
|
||||
- [rpc] \#3080 check if the variable "skipCount" is bigger than zero. If it is not, we set it to 0. If it, we do not do anything.
|
||||
- [crypto/multisig] \#3102 fix multisig keys address length
|
||||
- [crypto/encoding] \#3101 Fix `PubKeyMultisigThreshold` unmarshalling into `crypto.PubKey` interface
|
||||
|
||||
|
||||
183
CONTRIBUTING.md
183
CONTRIBUTING.md
@@ -1,25 +1,71 @@
|
||||
# Contributing
|
||||
|
||||
Thank you for considering making contributions to Tendermint and related repositories! Start by taking a look at the [coding repo](https://github.com/tendermint/coding) for overall information on repository workflow and standards.
|
||||
Thank you for your interest in contributing to Tendermint! Before
|
||||
contributing, it may be helpful to understand the goal of the project. The goal
|
||||
of Tendermint is to develop a BFT consensus engine robust enough to
|
||||
support permissionless value-carrying networks. While all contributions are
|
||||
welcome, contributors should bear this goal in mind in deciding if they should
|
||||
target the main tendermint project or a potential fork. When targeting the
|
||||
main Tendermint project, the following process leads to the best chance of
|
||||
landing changes in master.
|
||||
|
||||
Please follow standard github best practices: fork the repo, branch from the tip of develop, make some commits, and submit a pull request to develop. See the [open issues](https://github.com/tendermint/tendermint/issues) for things we need help with!
|
||||
All work on the code base should be motivated by a [Github
|
||||
Issue](https://github.com/tendermint/tendermint/issues).
|
||||
[Search](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22)
|
||||
is a good place start when looking for places to contribute. If you
|
||||
would like to work on an issue which already exists, please indicate so
|
||||
by leaving a comment.
|
||||
|
||||
Please make sure to use `gofmt` before every commit - the easiest way to do this is have your editor run it for you upon saving a file.
|
||||
All new contributions should start with a [Github
|
||||
Issue](https://github.com/tendermint/tendermint/issues/new/choose). The
|
||||
issue helps capture the problem you're trying to solve and allows for
|
||||
early feedback. Once the issue is created the process can proceed in different
|
||||
directions depending on how well defined the problem and potential
|
||||
solution are. If the change is simple and well understood, maintainers
|
||||
will indicate their support with a heartfelt emoji.
|
||||
|
||||
If the issue would benefit from thorough discussion, maintainers may
|
||||
request that you create a [Request For
|
||||
Comment](https://github.com/tendermint/spec/tree/master/rfc). Discussion
|
||||
at the RFC stage will build collective understanding of the dimensions
|
||||
of the problems and help structure conversations around trade-offs.
|
||||
|
||||
When the problem is well understood but the solution leads to large
|
||||
strucural changes to the code base, these changes should be proposed in
|
||||
the form of an [Architectural Decision Record
|
||||
(ADR)](./docs/architecture/). The ADR will help build consensus on an
|
||||
overall strategy to ensure the code base maintains coherence
|
||||
in the larger context. If you are not comfortable with writing an ADR,
|
||||
you can open a less-formal issue and the maintainers will help you
|
||||
turn it into an ADR. ADR numbers can be registered [here](https://github.com/tendermint/tendermint/issues/2313).
|
||||
|
||||
When the problem as well as proposed solution are well understood,
|
||||
changes should start with a [draft
|
||||
pull request](https://github.blog/2019-02-14-introducing-draft-pull-requests/)
|
||||
against master. The draft signals that work is underway. When the work
|
||||
is ready for feedback, hitting "Ready for Review" will signal to the
|
||||
maintainers to take a look.
|
||||
|
||||

|
||||
|
||||
Each stage of the process is aimed at creating feedback cycles which align contributors and maintainers to make sure:
|
||||
* Contributors don’t waste their time implementing/proposing features which won’t land in master.
|
||||
* Maintainers have the necessary context in order to support and review contributions.
|
||||
|
||||
## Forking
|
||||
|
||||
Please note that Go requires code to live under absolute paths, which complicates forking.
|
||||
While my fork lives at `https://github.com/ebuchman/tendermint`,
|
||||
the code should never exist at `$GOPATH/src/github.com/ebuchman/tendermint`.
|
||||
the code should never exist at `$GOPATH/src/github.com/ebuchman/tendermint`.
|
||||
Instead, we use `git remote` to add the fork as a new remote for the original repo,
|
||||
`$GOPATH/src/github.com/tendermint/tendermint `, and do all the work there.
|
||||
`$GOPATH/src/github.com/tendermint/tendermint`, and do all the work there.
|
||||
|
||||
For instance, to create a fork and work on a branch of it, I would:
|
||||
|
||||
* Create the fork on github, using the fork button.
|
||||
* Go to the original repo checked out locally (i.e. `$GOPATH/src/github.com/tendermint/tendermint`)
|
||||
* `git remote rename origin upstream`
|
||||
* `git remote add origin git@github.com:ebuchman/basecoin.git`
|
||||
- Create the fork on github, using the fork button.
|
||||
- Go to the original repo checked out locally (i.e. `$GOPATH/src/github.com/tendermint/tendermint`)
|
||||
- `git remote rename origin upstream`
|
||||
- `git remote add origin git@github.com:ebuchman/basecoin.git`
|
||||
|
||||
Now `origin` refers to my fork and `upstream` refers to the tendermint version.
|
||||
So I can `git push -u origin master` to update my fork, and make pull requests to tendermint from there.
|
||||
@@ -27,14 +73,12 @@ Of course, replace `ebuchman` with your git handle.
|
||||
|
||||
To pull in updates from the origin repo, run
|
||||
|
||||
* `git fetch upstream`
|
||||
* `git rebase upstream/master` (or whatever branch you want)
|
||||
|
||||
Please don't make Pull Requests to `master`.
|
||||
- `git fetch upstream`
|
||||
- `git rebase upstream/master` (or whatever branch you want)
|
||||
|
||||
## Dependencies
|
||||
|
||||
We use [dep](https://github.com/golang/dep) to manage dependencies.
|
||||
We use [go modules](https://github.com/golang/go/wiki/Modules) to manage dependencies.
|
||||
|
||||
That said, the master branch of every Tendermint repository should just build
|
||||
with `go get`, which means they should be kept up-to-date with their
|
||||
@@ -42,18 +86,17 @@ dependencies so we can get away with telling people they can just `go get` our
|
||||
software.
|
||||
|
||||
Since some dependencies are not under our control, a third party may break our
|
||||
build, in which case we can fall back on `dep ensure` (or `make
|
||||
get_vendor_deps`). Even for dependencies under our control, dep helps us to
|
||||
build, in which case we can fall back on `go mod tidy`. Even for dependencies under our control, go helps us to
|
||||
keep multiple repos in sync as they evolve. Anything with an executable, such
|
||||
as apps, tools, and the core, should use dep.
|
||||
|
||||
Run `dep status` to get a list of vendor dependencies that may not be
|
||||
Run `go list -u -m all` to get a list of dependencies that may not be
|
||||
up-to-date.
|
||||
|
||||
When updating dependencies, please only update the particular dependencies you
|
||||
need. Instead of running `dep ensure -update`, which will update anything,
|
||||
need. Instead of running `go get -u=patch`, which will update anything,
|
||||
specify exactly the dependency you want to update, eg.
|
||||
`dep ensure -update github.com/tendermint/go-amino`.
|
||||
`GO111MODULE=on go get -u github.com/tendermint/go-amino@master`.
|
||||
|
||||
## Vagrant
|
||||
|
||||
@@ -105,53 +148,66 @@ removed from the header in rpc responses as well.
|
||||
|
||||
## Branching Model and Release
|
||||
|
||||
All repos should adhere to the branching model: http://nvie.com/posts/a-successful-git-branching-model/.
|
||||
This means that all pull-requests should be made against develop. Any merge to
|
||||
master constitutes a tagged release.
|
||||
The main development branch is master.
|
||||
|
||||
### Development Procedure:
|
||||
- the latest state of development is on `develop`
|
||||
- `develop` must never fail `make test`
|
||||
- never --force onto `develop` (except when reverting a broken commit, which should seldom happen)
|
||||
Every release is maintained in a release branch named `vX.Y.Z`.
|
||||
|
||||
Note all pull requests should be squash merged except for merging to a release branch (named `vX.Y`). This keeps the commit history clean and makes it
|
||||
easy to reference the pull request where a change was introduced.
|
||||
|
||||
### Development Procedure
|
||||
|
||||
- the latest state of development is on `master`
|
||||
- `master` must never fail `make test`
|
||||
- never --force onto `master` (except when reverting a broken commit, which should seldom happen)
|
||||
- create a development branch either on github.com/tendermint/tendermint, or your fork (using `git remote add origin`)
|
||||
- make changes and update the `CHANGELOG_PENDING.md` to record your change
|
||||
- before submitting a pull request, run `git rebase` on top of the latest `develop`
|
||||
- before submitting a pull request, run `git rebase` on top of the latest `master`
|
||||
|
||||
### Pull Merge Procedure:
|
||||
- ensure pull branch is based on a recent develop
|
||||
### Pull Merge Procedure
|
||||
|
||||
- ensure pull branch is based on a recent `master`
|
||||
- run `make test` to ensure that all tests pass
|
||||
- merge pull request
|
||||
- squash merge pull request
|
||||
- the `unstable` branch may be used to aggregate pull merges before fixing tests
|
||||
|
||||
### Release Procedure:
|
||||
- start on `develop`
|
||||
- run integration tests (see `test_integrations` in Makefile)
|
||||
- prepare changelog:
|
||||
- copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all issues
|
||||
- run `bash ./scripts/authors.sh` to get a list of authors since the latest
|
||||
release, and add the github aliases of external contributors to the top of
|
||||
the changelog. To lookup an alias from an email, try `bash
|
||||
./scripts/authors.sh <email>`
|
||||
- reset the `CHANGELOG_PENDING.md`
|
||||
- bump versions
|
||||
- push to release/vX.X.X to run the extended integration tests on the CI
|
||||
- merge to master
|
||||
- merge master back to develop
|
||||
### Release Procedure
|
||||
|
||||
### Hotfix Procedure:
|
||||
- start on `master`
|
||||
- checkout a new branch named hotfix-vX.X.X
|
||||
- make the required changes
|
||||
- these changes should be small and an absolute necessity
|
||||
- add a note to CHANGELOG.md
|
||||
- bump versions
|
||||
- push to hotfix-vX.X.X to run the extended integration tests on the CI
|
||||
- merge hotfix-vX.X.X to master
|
||||
- merge hotfix-vX.X.X to develop
|
||||
- delete the hotfix-vX.X.X branch
|
||||
#### Major Release
|
||||
|
||||
1. start on `master`
|
||||
2. run integration tests (see `test_integrations` in Makefile)
|
||||
3. prepare release in a pull request against `master` (to be squash merged):
|
||||
- copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all issues
|
||||
- run `bash ./scripts/authors.sh` to get a list of authors since the latest
|
||||
release, and add the github aliases of external contributors to the top of
|
||||
the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- reset the `CHANGELOG_PENDING.md`
|
||||
- bump versions
|
||||
4. push your changes with prepared release details to `vX.X` (this will trigger the release `vX.X.0`)
|
||||
5. merge back to master (don't squash merge!)
|
||||
|
||||
#### Minor Release
|
||||
|
||||
If there were no breaking changes and you need to create a release nonetheless,
|
||||
the procedure is almost exactly like with a new release above.
|
||||
|
||||
The only difference is that in the end you create a pull request against the existing `X.X` branch.
|
||||
The branch name should match the release number you want to create.
|
||||
Merging this PR will trigger the next release.
|
||||
For example, if the PR is against an existing 0.34 branch which already contains a v0.34.0 release/tag,
|
||||
the patch version will be incremented and the created release will be v0.34.1.
|
||||
|
||||
#### Backport Release
|
||||
|
||||
1. start from the existing release branch you want to backport changes to (e.g. v0.30)
|
||||
Branch to a release/vX.X.X branch locally (e.g. release/v0.30.7)
|
||||
2. cherry pick the commit(s) that contain the changes you want to backport (usually these commits are from squash-merged PRs which were already reviewed)
|
||||
3. steps 2 and 3 from [Major Release](#major-release)
|
||||
4. push changes to release/vX.X.X branch
|
||||
5. open a PR against the existing vX.X branch
|
||||
|
||||
## Testing
|
||||
|
||||
@@ -161,3 +217,16 @@ If they have `.go` files in the root directory, they will be automatically
|
||||
tested by circle using `go test -v -race ./...`. If not, they will need a
|
||||
`circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and
|
||||
includes its continuous integration status using a badge in the `README.md`.
|
||||
|
||||
### RPC Testing
|
||||
|
||||
If you contribute to the RPC endpoints it's important to document your changes in the [Swagger file](./rpc/swagger/swagger.yaml)
|
||||
To test your changes you should install `nodejs` and run:
|
||||
|
||||
```bash
|
||||
npm i -g dredd
|
||||
make build-linux build-contract-tests-hooks
|
||||
make contract-tests
|
||||
```
|
||||
|
||||
This command will popup a network and check every endpoint against what has been documented
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
FROM alpine:3.7
|
||||
MAINTAINER Greg Szabo <greg@tendermint.com>
|
||||
FROM alpine:3.9
|
||||
LABEL maintainer="hello@tendermint.com"
|
||||
|
||||
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json
|
||||
# (unless you change `genesis_file` in config.toml). You can put your config.toml and
|
||||
@@ -36,4 +36,3 @@ STOPSIGNAL SIGTERM
|
||||
|
||||
ARG BINARY=tendermint
|
||||
COPY $BINARY /usr/bin/tendermint
|
||||
|
||||
|
||||
@@ -1,23 +0,0 @@
|
||||
FROM golang:latest
|
||||
|
||||
RUN mkdir -p /go/src/github.com/tendermint/abci
|
||||
WORKDIR /go/src/github.com/tendermint/abci
|
||||
|
||||
COPY Makefile /go/src/github.com/tendermint/abci/
|
||||
|
||||
# see make protoc for details on ldconfig
|
||||
RUN make get_protoc && ldconfig
|
||||
|
||||
# killall is used in tests
|
||||
RUN apt-get update && apt-get install -y \
|
||||
psmisc \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY Gopkg.toml /go/src/github.com/tendermint/abci/
|
||||
COPY Gopkg.lock /go/src/github.com/tendermint/abci/
|
||||
RUN make get_tools
|
||||
|
||||
# see https://github.com/golang/dep/issues/1312
|
||||
RUN dep ensure -vendor-only
|
||||
|
||||
COPY . /go/src/github.com/tendermint/abci
|
||||
28
DOCKER/Dockerfile.build_c-amazonlinux
Normal file
28
DOCKER/Dockerfile.build_c-amazonlinux
Normal file
@@ -0,0 +1,28 @@
|
||||
FROM amazonlinux:2
|
||||
|
||||
RUN yum -y update && \
|
||||
yum -y install wget
|
||||
|
||||
RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \
|
||||
rpm -ivh epel-release-latest-7.noarch.rpm
|
||||
|
||||
RUN yum -y groupinstall "Development Tools"
|
||||
RUN yum -y install leveldb-devel which
|
||||
|
||||
ENV GOVERSION=1.12.9
|
||||
|
||||
RUN cd /tmp && \
|
||||
wget https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
tar -C /usr/local -xf go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
mkdir -p /go/src && \
|
||||
mkdir -p /go/bin
|
||||
|
||||
ENV PATH=$PATH:/usr/local/go/bin:/go/bin
|
||||
ENV GOBIN=/go/bin
|
||||
ENV GOPATH=/go/src
|
||||
|
||||
RUN mkdir -p /tendermint
|
||||
WORKDIR /tendermint
|
||||
|
||||
CMD ["/usr/bin/make", "build_c"]
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
FROM alpine:3.7
|
||||
|
||||
ENV DATA_ROOT /tendermint
|
||||
ENV TMHOME $DATA_ROOT
|
||||
|
||||
RUN addgroup tmuser && \
|
||||
adduser -S -G tmuser tmuser
|
||||
|
||||
RUN mkdir -p $DATA_ROOT && \
|
||||
chown -R tmuser:tmuser $DATA_ROOT
|
||||
|
||||
RUN apk add --no-cache bash curl jq
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV PATH "$PATH:/go/bin"
|
||||
RUN mkdir -p /go/src/github.com/tendermint/tendermint && \
|
||||
apk add --no-cache go build-base git && \
|
||||
cd /go/src/github.com/tendermint/tendermint && \
|
||||
git clone https://github.com/tendermint/tendermint . && \
|
||||
git checkout develop && \
|
||||
make get_tools && \
|
||||
make get_vendor_deps && \
|
||||
make install && \
|
||||
cd - && \
|
||||
rm -rf /go/src/github.com/tendermint/tendermint && \
|
||||
apk del go build-base git
|
||||
|
||||
VOLUME $DATA_ROOT
|
||||
|
||||
EXPOSE 26656
|
||||
EXPOSE 26657
|
||||
|
||||
ENTRYPOINT ["tendermint"]
|
||||
|
||||
CMD ["node", "--moniker=`hostname`", "--proxy_app=kvstore"]
|
||||
@@ -1,5 +1,4 @@
|
||||
FROM golang:1.10.1
|
||||
|
||||
FROM golang:latest
|
||||
|
||||
# Grab deps (jq, hexdump, xxd, killall)
|
||||
RUN apt-get update && \
|
||||
@@ -15,4 +14,3 @@ VOLUME /go
|
||||
|
||||
EXPOSE 26656
|
||||
EXPOSE 26657
|
||||
|
||||
|
||||
@@ -4,13 +4,10 @@ build:
|
||||
push:
|
||||
@sh -c "'$(CURDIR)/push.sh'"
|
||||
|
||||
build_develop:
|
||||
docker build -t "tendermint/tendermint:develop" -f Dockerfile.develop .
|
||||
|
||||
build_testing:
|
||||
docker build --tag tendermint/testing -f ./Dockerfile.testing .
|
||||
|
||||
push_develop:
|
||||
docker push "tendermint/tendermint:develop"
|
||||
build_amazonlinux_buildimage:
|
||||
docker build -t "tendermint/tendermint:build_c-amazonlinux" -f Dockerfile.build_c-amazonlinux .
|
||||
|
||||
.PHONY: build build_develop push push_develop
|
||||
.PHONY: build push build_testing build_amazonlinux_buildimage
|
||||
|
||||
@@ -2,38 +2,27 @@
|
||||
|
||||
## Supported tags and respective `Dockerfile` links
|
||||
|
||||
- `0.17.1`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/208ac32fa266657bd6c304e84ec828aa252bb0b8/DOCKER/Dockerfile)
|
||||
- `0.15.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/170777300ea92dc21a8aec1abc16cb51812513a4/DOCKER/Dockerfile)
|
||||
- `0.13.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/a28b3fff49dce2fb31f90abb2fc693834e0029c2/DOCKER/Dockerfile)
|
||||
- `0.12.1` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/457c688346b565e90735431619ca3ca597ef9007/DOCKER/Dockerfile)
|
||||
- `0.12.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/70d8afa6e952e24c573ece345560a5971bf2cc0e/DOCKER/Dockerfile)
|
||||
- `0.11.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/9177cc1f64ca88a4a0243c5d1773d10fba67e201/DOCKER/Dockerfile)
|
||||
- `0.10.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/e5342f4054ab784b2cd6150e14f01053d7c8deb2/DOCKER/Dockerfile)
|
||||
- `0.9.1`, `0.9`, [(Dockerfile)](https://github.com/tendermint/tendermint/blob/809e0e8c5933604ba8b2d096803ada7c5ec4dfd3/DOCKER/Dockerfile)
|
||||
- `0.9.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/d474baeeea6c22b289e7402449572f7c89ee21da/DOCKER/Dockerfile)
|
||||
- `0.8.0`, `0.8` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/bf64dd21fdb193e54d8addaaaa2ecf7ac371de8c/DOCKER/Dockerfile)
|
||||
- `develop` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/master/DOCKER/Dockerfile.develop)
|
||||
DockerHub tags for official releases are [here](https://hub.docker.com/r/tendermint/tendermint/tags/). The "latest" tag will always point to the highest version number.
|
||||
|
||||
`develop` tag points to the [develop](https://github.com/tendermint/tendermint/tree/develop) branch.
|
||||
Official releases can be found [here](https://github.com/tendermint/tendermint/releases).
|
||||
|
||||
The Dockerfile for tendermint is not expected to change in the near future. The master file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/master/DOCKER/Dockerfile).
|
||||
|
||||
Respective versioned files can be found https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile (replace the Xs with the version number).
|
||||
|
||||
## Quick reference
|
||||
|
||||
* **Where to get help:**
|
||||
https://cosmos.network/community
|
||||
|
||||
* **Where to file issues:**
|
||||
https://github.com/tendermint/tendermint/issues
|
||||
|
||||
* **Supported Docker versions:**
|
||||
[the latest release](https://github.com/moby/moby/releases) (down to 1.6 on a best-effort basis)
|
||||
- **Where to get help:** https://tendermint.com/
|
||||
- **Where to file issues:** https://github.com/tendermint/tendermint/issues
|
||||
- **Supported Docker versions:** [the latest release](https://github.com/moby/moby/releases) (down to 1.6 on a best-effort basis)
|
||||
|
||||
## Tendermint
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language, and securely replicates it on many machines.
|
||||
|
||||
For more background, see the [introduction](https://tendermint.readthedocs.io/en/master/introduction.html).
|
||||
For more background, see the [the docs](https://docs.tendermint.com/master/introduction/#quick-start).
|
||||
|
||||
To get started developing applications, see the [application developers guide](https://tendermint.readthedocs.io/en/master/getting-started.html).
|
||||
To get started developing applications, see the [application developers guide](https://docs.tendermint.com/master/introduction/quick-start.html).
|
||||
|
||||
## How to use this image
|
||||
|
||||
@@ -48,7 +37,7 @@ docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app
|
||||
|
||||
## Local cluster
|
||||
|
||||
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/master/Makefile) and run:
|
||||
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/blob/master/Makefile) and run:
|
||||
|
||||
```
|
||||
make build-linux
|
||||
@@ -60,7 +49,7 @@ Note that this will build and use a different image than the ones provided here.
|
||||
|
||||
## License
|
||||
|
||||
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/master/LICENSE).
|
||||
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/blob/master/LICENSE).
|
||||
|
||||
## Contributing
|
||||
|
||||
|
||||
554
Gopkg.lock
generated
554
Gopkg.lock
generated
@@ -1,554 +0,0 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
pruneopts = "UT"
|
||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c0decf632843204d2b8781de7b26e7038584e2dcccc7e2f401e88ae85b1df2b7"
|
||||
name = "github.com/btcsuite/btcd"
|
||||
packages = ["btcec"]
|
||||
pruneopts = "UT"
|
||||
revision = "67e573d211ace594f1366b4ce9d39726c4b19bd0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1d8e1cb71c33a9470bbbae09bfec09db43c6bf358dfcae13cd8807c4e2a9a2bf"
|
||||
name = "github.com/btcsuite/btcutil"
|
||||
packages = [
|
||||
"base58",
|
||||
"bech32",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
pruneopts = "UT"
|
||||
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:544229a3ca0fb2dd5ebc2896d3d2ff7ce096d9751635301e44e37e761349ee70"
|
||||
name = "github.com/fortytw2/leaktest"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "a5ef70473c97b71626b9abeda80ee92ba2a7de9e"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd"
|
||||
name = "github.com/fsnotify/fsnotify"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
|
||||
version = "v1.4.7"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:fdf5169073fb0ad6dc12a70c249145e30f4058647bea25f0abd48b6d9f228a11"
|
||||
name = "github.com/go-kit/kit"
|
||||
packages = [
|
||||
"log",
|
||||
"log/level",
|
||||
"log/term",
|
||||
"metrics",
|
||||
"metrics/discard",
|
||||
"metrics/internal/lv",
|
||||
"metrics/prometheus",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "4dc7be5d2d12881735283bcab7352178e190fc71"
|
||||
version = "v0.6.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:31a18dae27a29aa074515e43a443abfd2ba6deb6d69309d8d7ce789c45f34659"
|
||||
name = "github.com/go-logfmt/logfmt"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:586ea76dbd0374d6fb649a91d70d652b7fe0ccffb8910a77468e7702e7901f3d"
|
||||
name = "github.com/go-stack/stack"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "2fee6af1a9795aafbe0253a0cfbdf668e1fb8a9a"
|
||||
version = "v1.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:35621fe20f140f05a0c4ef662c26c0ab4ee50bca78aa30fe87d33120bd28165e"
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = [
|
||||
"gogoproto",
|
||||
"jsonpb",
|
||||
"proto",
|
||||
"protoc-gen-gogo/descriptor",
|
||||
"sortkeys",
|
||||
"types",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:17fe264ee908afc795734e8c4e63db2accabaf57326dbf21763a7d6b86096260"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:4a0c6bb4805508a6287675fac876be2ac1182539ca8a32468d8128882e9d5009"
|
||||
name = "github.com/golang/snappy"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:43dd08a10854b2056e615d1b1d22ac94559d822e1f8b6fcc92c1a1057e85188e"
|
||||
name = "github.com/gorilla/websocket"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ea40c24cdbacd054a6ae9de03e62c5f252479b96c716375aace5c120d68647c8"
|
||||
name = "github.com/hashicorp/hcl"
|
||||
packages = [
|
||||
".",
|
||||
"hcl/ast",
|
||||
"hcl/parser",
|
||||
"hcl/scanner",
|
||||
"hcl/strconv",
|
||||
"hcl/token",
|
||||
"json/parser",
|
||||
"json/scanner",
|
||||
"json/token",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:39b27d1381a30421f9813967a5866fba35dc1d4df43a6eefe3b7a5444cb07214"
|
||||
name = "github.com/jmhodges/levigo"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a64e323dc06b73892e5bb5d040ced475c4645d456038333883f58934abbf6f72"
|
||||
name = "github.com/kr/logfmt"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c568d7727aa262c32bdf8a3f7db83614f7af0ed661474b24588de635c20024c7"
|
||||
name = "github.com/magiconair/properties"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "c2353362d570a7bfa228149c62842019201cfb71"
|
||||
version = "v1.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
pruneopts = "UT"
|
||||
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe"
|
||||
version = "v1.1.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:95741de3af260a92cc5c7f3f3061e85273f5a81b5db20d4bd68da74bd521675e"
|
||||
name = "github.com/pelletier/go-toml"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:40e195917a951a8bf867cd05de2a46aaf1806c50cf92eebf4c16f78cd196f747"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
pruneopts = "UT"
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:26663fafdea73a38075b07e8e9d82fc0056379d2be8bb4e13899e8fda7c7dd23"
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = [
|
||||
"prometheus",
|
||||
"prometheus/internal",
|
||||
"prometheus/promhttp",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "abad2d1bd44235a26707c172eab6bca5bf2dbad3"
|
||||
version = "v0.9.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
pruneopts = "UT"
|
||||
revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:db712fde5d12d6cdbdf14b777f0c230f4ff5ab0be8e35b239fc319953ed577a4"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"model",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "7e9e6cabbd393fc208072eedef99188d0ce788b6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:ef74914912f99c79434d9c09658274678bc85080ebe3ab32bec3940ebce5e1fc"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "185b4288413d2a0dd0806f78c90dde719829e5ae"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c4556a44e350b50a490544d9b06e9fba9c286c21d6c0e47f54f3a9214597298c"
|
||||
name = "github.com/rcrowley/go-metrics"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b0c25f00bad20d783d259af2af8666969e2fc343fa0dc9efe52936bbd67fb758"
|
||||
name = "github.com/rs/cors"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "9a47f48565a795472d43519dd49aac781f3034fb"
|
||||
version = "v1.6.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6a4a11ba764a56d2758899ec6f3848d24698d48442ebce85ee7a3f63284526cd"
|
||||
name = "github.com/spf13/afero"
|
||||
packages = [
|
||||
".",
|
||||
"mem",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "d40851caa0d747393da1ffb28f7f9d8b4eeffebd"
|
||||
version = "v1.1.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:08d65904057412fc0270fc4812a1c90c594186819243160dc779a402d4b6d0bc"
|
||||
name = "github.com/spf13/cast"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "8c9545af88b134710ab1cd196795e7f2388358d7"
|
||||
version = "v1.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7ffc0983035bc7e297da3688d9fe19d60a420e9c38bef23f845c53788ed6a05e"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b"
|
||||
version = "v0.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:68ea4e23713989dc20b1bded5d9da2c5f9be14ff9885beef481848edd18c26cb"
|
||||
name = "github.com/spf13/jwalterweatherman"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "4a4406e478ca629068e7768fc33f3f044173c0a6"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
|
||||
version = "v1.0.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f8e1a678a2571e265f4bf91a3e5e32aa6b1474a55cb0ea849750cc177b664d96"
|
||||
name = "github.com/spf13/viper"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "25b30aa063fc18e48662b86996252eabdcf2f0c7"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7e8d267900c7fa7f35129a2a37596e38ed0f11ca746d6d9ba727980ee138f9f6"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
"require",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:59483b8e8183f10ab21a85ba1f4cbb4a2335d48891801f79ed7b9499f44d383c"
|
||||
name = "github.com/syndtr/goleveldb"
|
||||
packages = [
|
||||
"leveldb",
|
||||
"leveldb/cache",
|
||||
"leveldb/comparer",
|
||||
"leveldb/errors",
|
||||
"leveldb/filter",
|
||||
"leveldb/iterator",
|
||||
"leveldb/journal",
|
||||
"leveldb/memdb",
|
||||
"leveldb/opt",
|
||||
"leveldb/storage",
|
||||
"leveldb/table",
|
||||
"leveldb/util",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "6b91fda63f2e36186f1c9d0e48578defb69c5d43"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:605b6546f3f43745695298ec2d342d3e952b6d91cdf9f349bea9315f677d759f"
|
||||
name = "github.com/tendermint/btcd"
|
||||
packages = ["btcec"]
|
||||
pruneopts = "UT"
|
||||
revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ad9c4c1a4e7875330b1f62906f2830f043a23edb5db997e3a5ac5d3e6eadf80a"
|
||||
name = "github.com/tendermint/go-amino"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "dc14acf9ef15f85828bfbc561ed9dd9d2a284885"
|
||||
version = "v0.14.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:00d2b3e64cdc3fa69aa250dfbe4cc38c4837d4f37e62279be2ae52107ffbbb44"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = [
|
||||
"bcrypt",
|
||||
"blowfish",
|
||||
"chacha20poly1305",
|
||||
"curve25519",
|
||||
"ed25519",
|
||||
"ed25519/internal/edwards25519",
|
||||
"hkdf",
|
||||
"internal/chacha20",
|
||||
"internal/subtle",
|
||||
"nacl/box",
|
||||
"nacl/secretbox",
|
||||
"openpgp/armor",
|
||||
"openpgp/errors",
|
||||
"poly1305",
|
||||
"ripemd160",
|
||||
"salsa20/salsa",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "505ab145d0a99da450461ae2c1a9f6cd10d1f447"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d36f55a999540d29b6ea3c2ea29d71c76b1d9853fdcd3e5c5cb4836f2ba118f1"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"http/httpguts",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"internal/timeseries",
|
||||
"netutil",
|
||||
"trace",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:6f86e2f2e2217cd4d74dec6786163cf80e4d2b99adb341ecc60a45113b844dca"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"cpu",
|
||||
"unix",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "7e31e0c00fa05cb5fbf4347b585621d6709e19a4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"language",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:56b0bca90b7e5d1facf5fbdacba23e4e0ce069d25381b8e2f70ef1e7ebfb9c1a"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
pruneopts = "UT"
|
||||
revision = "b69ba1387ce2108ac9bc8e8e5e5a46e7d5c72313"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2dab32a43451e320e49608ff4542fdfc653c95dcc35d0065ec9c6c3dd540ed74"
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [
|
||||
".",
|
||||
"balancer",
|
||||
"balancer/base",
|
||||
"balancer/roundrobin",
|
||||
"codes",
|
||||
"connectivity",
|
||||
"credentials",
|
||||
"encoding",
|
||||
"encoding/proto",
|
||||
"grpclog",
|
||||
"internal",
|
||||
"internal/backoff",
|
||||
"internal/channelz",
|
||||
"internal/grpcrand",
|
||||
"keepalive",
|
||||
"metadata",
|
||||
"naming",
|
||||
"peer",
|
||||
"resolver",
|
||||
"resolver/dns",
|
||||
"resolver/passthrough",
|
||||
"stats",
|
||||
"status",
|
||||
"tap",
|
||||
"transport",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
version = "v1.13.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"github.com/btcsuite/btcutil/base58",
|
||||
"github.com/btcsuite/btcutil/bech32",
|
||||
"github.com/fortytw2/leaktest",
|
||||
"github.com/go-kit/kit/log",
|
||||
"github.com/go-kit/kit/log/level",
|
||||
"github.com/go-kit/kit/log/term",
|
||||
"github.com/go-kit/kit/metrics",
|
||||
"github.com/go-kit/kit/metrics/discard",
|
||||
"github.com/go-kit/kit/metrics/prometheus",
|
||||
"github.com/go-logfmt/logfmt",
|
||||
"github.com/gogo/protobuf/gogoproto",
|
||||
"github.com/gogo/protobuf/jsonpb",
|
||||
"github.com/gogo/protobuf/proto",
|
||||
"github.com/gogo/protobuf/types",
|
||||
"github.com/golang/protobuf/proto",
|
||||
"github.com/golang/protobuf/ptypes/timestamp",
|
||||
"github.com/gorilla/websocket",
|
||||
"github.com/jmhodges/levigo",
|
||||
"github.com/pkg/errors",
|
||||
"github.com/prometheus/client_golang/prometheus",
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp",
|
||||
"github.com/rcrowley/go-metrics",
|
||||
"github.com/rs/cors",
|
||||
"github.com/spf13/cobra",
|
||||
"github.com/spf13/viper",
|
||||
"github.com/stretchr/testify/assert",
|
||||
"github.com/stretchr/testify/require",
|
||||
"github.com/syndtr/goleveldb/leveldb",
|
||||
"github.com/syndtr/goleveldb/leveldb/errors",
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator",
|
||||
"github.com/syndtr/goleveldb/leveldb/opt",
|
||||
"github.com/tendermint/btcd/btcec",
|
||||
"github.com/tendermint/go-amino",
|
||||
"golang.org/x/crypto/bcrypt",
|
||||
"golang.org/x/crypto/chacha20poly1305",
|
||||
"golang.org/x/crypto/curve25519",
|
||||
"golang.org/x/crypto/ed25519",
|
||||
"golang.org/x/crypto/hkdf",
|
||||
"golang.org/x/crypto/nacl/box",
|
||||
"golang.org/x/crypto/nacl/secretbox",
|
||||
"golang.org/x/crypto/openpgp/armor",
|
||||
"golang.org/x/crypto/ripemd160",
|
||||
"golang.org/x/net/context",
|
||||
"golang.org/x/net/netutil",
|
||||
"google.golang.org/grpc",
|
||||
"google.golang.org/grpc/credentials",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
109
Gopkg.toml
109
Gopkg.toml
@@ -1,109 +0,0 @@
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
#
|
||||
# [prune]
|
||||
# non-go = false
|
||||
# go-tests = true
|
||||
# unused-packages = true
|
||||
#
|
||||
###########################################################
|
||||
|
||||
# Allow only patch releases for serialization libraries
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/go-amino"
|
||||
version = "~0.14.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
version = "~1.1.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/golang/protobuf"
|
||||
version = "~1.1.0"
|
||||
|
||||
# Allow only minor releases for other libraries
|
||||
[[constraint]]
|
||||
name = "github.com/go-kit/kit"
|
||||
version = "^0.6.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gorilla/websocket"
|
||||
version = "^1.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/rs/cors"
|
||||
version = "^1.6.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/pkg/errors"
|
||||
version = "^0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/cobra"
|
||||
version = "^0.0.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/viper"
|
||||
version = "^1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "^1.2.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
version = "^1.13.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/fortytw2/leaktest"
|
||||
version = "^1.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
version = "^0.9.1"
|
||||
|
||||
###################################
|
||||
## Some repos dont have releases.
|
||||
## Pin to revision
|
||||
|
||||
[[constraint]]
|
||||
name = "golang.org/x/crypto"
|
||||
revision = "505ab145d0a99da450461ae2c1a9f6cd10d1f447"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/jmhodges/levigo"
|
||||
revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9"
|
||||
|
||||
# last revision used by go-crypto
|
||||
[[constraint]]
|
||||
name = "github.com/btcsuite/btcutil"
|
||||
revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/btcd"
|
||||
revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/rcrowley/go-metrics"
|
||||
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
|
||||
|
||||
[[constraint]]
|
||||
name = "golang.org/x/net"
|
||||
revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
293
Makefile
293
Makefile
@@ -1,39 +1,34 @@
|
||||
GOTOOLS = \
|
||||
github.com/mitchellh/gox \
|
||||
github.com/golang/dep/cmd/dep \
|
||||
github.com/alecthomas/gometalinter \
|
||||
github.com/gogo/protobuf/protoc-gen-gogo \
|
||||
github.com/square/certstrap
|
||||
GOBIN?=${GOPATH}/bin
|
||||
PACKAGES=$(shell go list ./...)
|
||||
OUTPUT?=build/tendermint
|
||||
|
||||
INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf
|
||||
INCLUDE = -I=${GOPATH}/src/github.com/tendermint/tendermint -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf
|
||||
BUILD_TAGS?='tendermint'
|
||||
BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`"
|
||||
|
||||
LINT_FLAGS = --exclude '.*\.pb\.go' --exclude 'vendor/*' --vendor --deadline=600s
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD` -s -w
|
||||
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
|
||||
|
||||
all: check build test install
|
||||
|
||||
check: check_tools get_vendor_deps
|
||||
# The below include contains the tools.
|
||||
include tools.mk
|
||||
include tests.mk
|
||||
|
||||
########################################
|
||||
### Build Tendermint
|
||||
|
||||
build:
|
||||
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/
|
||||
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/
|
||||
|
||||
build_c:
|
||||
CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" -o build/tendermint ./cmd/tendermint/
|
||||
CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) cleveldb" -o $(OUTPUT) ./cmd/tendermint/
|
||||
|
||||
build_race:
|
||||
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint
|
||||
CGO_ENABLED=1 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint
|
||||
|
||||
install:
|
||||
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
|
||||
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
|
||||
|
||||
install_c:
|
||||
CGO_ENABLED=1 go install $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" ./cmd/tendermint
|
||||
CGO_ENABLED=1 go install $(BUILD_FLAGS) -tags "$(BUILD_TAGS) cleveldb" ./cmd/tendermint
|
||||
|
||||
########################################
|
||||
### Protobuf
|
||||
@@ -46,7 +41,7 @@ protoc_all: protoc_libs protoc_merkle protoc_abci protoc_grpc protoc_proto3types
|
||||
## See https://stackoverflow.com/a/25518702
|
||||
## Note the $< here is substituted for the %.proto
|
||||
## Note the $@ here is substituted for the %.pb.go
|
||||
protoc $(INCLUDE) $< --gogo_out=Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,plugins=grpc:.
|
||||
protoc $(INCLUDE) $< --gogo_out=Mgoogle/protobuf/timestamp.proto=github.com/golang/protobuf/ptypes/timestamp,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc:../../..
|
||||
|
||||
########################################
|
||||
### Build ABCI
|
||||
@@ -57,10 +52,10 @@ protoc_abci: abci/types/types.pb.go
|
||||
protoc_proto3types: types/proto3/block.pb.go
|
||||
|
||||
build_abci:
|
||||
@go build -i ./abci/cmd/...
|
||||
@go build -mod=readonly -i ./abci/cmd/...
|
||||
|
||||
install_abci:
|
||||
@go install ./abci/cmd/...
|
||||
@go install -mod=readonly ./abci/cmd/...
|
||||
|
||||
########################################
|
||||
### Distribution
|
||||
@@ -70,43 +65,15 @@ install_abci:
|
||||
dist:
|
||||
@BUILD_TAGS=$(BUILD_TAGS) sh -c "'$(CURDIR)/scripts/dist.sh'"
|
||||
|
||||
########################################
|
||||
### Tools & dependencies
|
||||
go-mod-cache: go.sum
|
||||
@echo "--> Download go modules to local cache"
|
||||
@go mod download
|
||||
.PHONY: go-mod-cache
|
||||
|
||||
check_tools:
|
||||
@# https://stackoverflow.com/a/25668869
|
||||
@echo "Found tools: $(foreach tool,$(notdir $(GOTOOLS)),\
|
||||
$(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH")))"
|
||||
|
||||
get_tools:
|
||||
@echo "--> Installing tools"
|
||||
./scripts/get_tools.sh
|
||||
|
||||
get_dev_tools:
|
||||
@echo "--> Downloading linters (this may take awhile)"
|
||||
$(GOPATH)/src/github.com/alecthomas/gometalinter/scripts/install.sh -b $(GOBIN)
|
||||
|
||||
update_tools:
|
||||
@echo "--> Updating tools"
|
||||
./scripts/get_tools.sh
|
||||
|
||||
#Update dependencies
|
||||
get_vendor_deps:
|
||||
@echo "--> Running dep"
|
||||
@dep ensure
|
||||
|
||||
#For ABCI and libs
|
||||
get_protoc:
|
||||
@# https://github.com/google/protobuf/releases
|
||||
curl -L https://github.com/google/protobuf/releases/download/v3.6.1/protobuf-cpp-3.6.1.tar.gz | tar xvz && \
|
||||
cd protobuf-3.6.1 && \
|
||||
DIST_LANG=cpp ./configure && \
|
||||
make && \
|
||||
make check && \
|
||||
sudo make install && \
|
||||
sudo ldconfig && \
|
||||
cd .. && \
|
||||
rm -rf protobuf-3.6.1
|
||||
go.sum: go.mod
|
||||
@echo "--> Ensure dependencies have not been modified"
|
||||
@go mod verify
|
||||
@go mod tidy
|
||||
|
||||
draw_deps:
|
||||
@# requires brew install graphviz or apt-get install graphviz
|
||||
@@ -115,117 +82,45 @@ draw_deps:
|
||||
|
||||
get_deps_bin_size:
|
||||
@# Copy of build recipe with additional flags to perform binary size analysis
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/ 2>&1))
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/ 2>&1))
|
||||
@find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log
|
||||
@echo "Results can be found here: $(CURDIR)/deps_bin_size.log"
|
||||
|
||||
########################################
|
||||
### Libs
|
||||
|
||||
protoc_libs: libs/common/types.pb.go
|
||||
protoc_libs: libs/kv/types.pb.go
|
||||
|
||||
# generates certificates for TLS testing in remotedb and RPC server
|
||||
gen_certs: clean_certs
|
||||
## Generating certificates for TLS testing...
|
||||
certstrap init --common-name "tendermint.com" --passphrase ""
|
||||
certstrap request-cert -ip "::" --passphrase ""
|
||||
certstrap sign "::" --CA "tendermint.com" --passphrase ""
|
||||
mv out/::.crt out/::.key db/remotedb
|
||||
|
||||
clean_certs:
|
||||
## Cleaning TLS testing certificates...
|
||||
certstrap request-cert --common-name "remotedb" -ip "127.0.0.1" --passphrase ""
|
||||
certstrap sign "remotedb" --CA "tendermint.com" --passphrase ""
|
||||
mv out/remotedb.crt libs/db/remotedb/test.crt
|
||||
mv out/remotedb.key libs/db/remotedb/test.key
|
||||
certstrap request-cert --common-name "server" -ip "127.0.0.1" --passphrase ""
|
||||
certstrap sign "server" --CA "tendermint.com" --passphrase ""
|
||||
mv out/server.crt rpc/lib/server/test.crt
|
||||
mv out/server.key rpc/lib/server/test.key
|
||||
rm -rf out
|
||||
rm -f db/remotedb/::.crt db/remotedb/::.key
|
||||
|
||||
test_libs: gen_certs
|
||||
GOCACHE=off go test -tags gcc $(PACKAGES)
|
||||
make clean_certs
|
||||
# deletes generated certificates
|
||||
clean_certs:
|
||||
rm -f libs/db/remotedb/test.crt
|
||||
rm -f libs/db/remotedb/test.key
|
||||
rm -f rpc/lib/server/test.crt
|
||||
rm -f rpc/lib/server/test.key
|
||||
|
||||
test_libs:
|
||||
go test -tags clevedb boltdb $(PACKAGES)
|
||||
|
||||
grpc_dbserver:
|
||||
protoc -I db/remotedb/proto/ db/remotedb/proto/defs.proto --go_out=plugins=grpc:db/remotedb/proto
|
||||
protoc -I libs/db/remotedb/proto/ libs/db/remotedb/proto/defs.proto --go_out=plugins=grpc:libs/db/remotedb/proto
|
||||
|
||||
protoc_grpc: rpc/grpc/types.pb.go
|
||||
|
||||
protoc_merkle: crypto/merkle/merkle.pb.go
|
||||
|
||||
########################################
|
||||
### Testing
|
||||
|
||||
## required to be run first by most tests
|
||||
build_docker_test_image:
|
||||
docker build -t tester -f ./test/docker/Dockerfile .
|
||||
|
||||
### coverage, app, persistence, and libs tests
|
||||
test_cover:
|
||||
# run the go unit tests with coverage
|
||||
bash test/test_cover.sh
|
||||
|
||||
test_apps:
|
||||
# run the app tests using bash
|
||||
# requires `abci-cli` and `tendermint` binaries installed
|
||||
bash test/app/test.sh
|
||||
|
||||
test_abci_apps:
|
||||
bash abci/tests/test_app/test.sh
|
||||
|
||||
test_abci_cli:
|
||||
# test the cli against the examples in the tutorial at:
|
||||
# ./docs/abci-cli.md
|
||||
# if test fails, update the docs ^
|
||||
@ bash abci/tests/test_cli/test.sh
|
||||
|
||||
test_persistence:
|
||||
# run the persistence tests using bash
|
||||
# requires `abci-cli` installed
|
||||
docker run --name run_persistence -t tester bash test/persist/test_failure_indices.sh
|
||||
|
||||
# TODO undockerize
|
||||
# bash test/persist/test_failure_indices.sh
|
||||
|
||||
test_p2p:
|
||||
docker rm -f rsyslog || true
|
||||
rm -rf test/logs || true
|
||||
mkdir test/logs
|
||||
cd test/
|
||||
docker run -d -v "logs:/var/log/" -p 127.0.0.1:5514:514/udp --name rsyslog voxxit/rsyslog
|
||||
cd ..
|
||||
# requires 'tester' the image from above
|
||||
bash test/p2p/test.sh tester
|
||||
# the `docker cp` takes a really long time; uncomment for debugging
|
||||
#
|
||||
# mkdir -p test/p2p/logs && docker cp rsyslog:/var/log test/p2p/logs
|
||||
|
||||
test_integrations:
|
||||
make build_docker_test_image
|
||||
make get_tools
|
||||
make get_vendor_deps
|
||||
make install
|
||||
make test_cover
|
||||
make test_apps
|
||||
make test_abci_apps
|
||||
make test_abci_cli
|
||||
make test_libs
|
||||
make test_persistence
|
||||
make test_p2p
|
||||
|
||||
test_release:
|
||||
@go test -tags release $(PACKAGES)
|
||||
|
||||
test100:
|
||||
@for i in {1..100}; do make test; done
|
||||
|
||||
vagrant_test:
|
||||
vagrant up
|
||||
vagrant ssh -c 'make test_integrations'
|
||||
|
||||
### go tests
|
||||
test:
|
||||
@echo "--> Running go test"
|
||||
@GOCACHE=off go test -p 1 $(PACKAGES)
|
||||
|
||||
test_race:
|
||||
@echo "--> Running go test --race"
|
||||
@GOCACHE=off go test -p 1 -v -race $(PACKAGES)
|
||||
|
||||
|
||||
########################################
|
||||
### Formatting, linting, and vetting
|
||||
@@ -233,54 +128,37 @@ test_race:
|
||||
fmt:
|
||||
@go fmt ./...
|
||||
|
||||
metalinter:
|
||||
lint:
|
||||
@echo "--> Running linter"
|
||||
@gometalinter $(LINT_FLAGS) --disable-all \
|
||||
--enable=deadcode \
|
||||
--enable=gosimple \
|
||||
--enable=misspell \
|
||||
--enable=safesql \
|
||||
./...
|
||||
#--enable=gas \
|
||||
#--enable=maligned \
|
||||
#--enable=dupl \
|
||||
#--enable=errcheck \
|
||||
#--enable=goconst \
|
||||
#--enable=gocyclo \
|
||||
#--enable=goimports \
|
||||
#--enable=golint \ <== comments on anything exported
|
||||
#--enable=gotype \
|
||||
#--enable=ineffassign \
|
||||
#--enable=interfacer \
|
||||
#--enable=megacheck \
|
||||
#--enable=staticcheck \
|
||||
#--enable=structcheck \
|
||||
#--enable=unconvert \
|
||||
#--enable=unparam \
|
||||
#--enable=unused \
|
||||
#--enable=varcheck \
|
||||
#--enable=vet \
|
||||
#--enable=vetshadow \
|
||||
|
||||
metalinter_all:
|
||||
@echo "--> Running linter (all)"
|
||||
gometalinter $(LINT_FLAGS) --enable-all --disable=lll ./...
|
||||
@golangci-lint run
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
|
||||
rpc-docs:
|
||||
cat rpc/core/slate_header.txt > $(DESTINATION)
|
||||
godoc2md -template rpc/core/doc_template.txt github.com/tendermint/tendermint/rpc/core | grep -v -e "pipe.go" -e "routes.go" -e "dev.go" | sed 's,/src/target,https://github.com/tendermint/tendermint/tree/master/rpc/core,' >> $(DESTINATION)
|
||||
###########################################################
|
||||
### Documentation
|
||||
|
||||
check_dep:
|
||||
dep status >> /dev/null
|
||||
!(grep -n branch Gopkg.toml)
|
||||
build-docs:
|
||||
cd docs && \
|
||||
while read p; do \
|
||||
(git checkout $${p} && npm install && VUEPRESS_BASE="/$${p}/" npm run build) ; \
|
||||
mkdir -p ~/output/$${p} ; \
|
||||
cp -r .vuepress/dist/* ~/output/$${p}/ ; \
|
||||
cp ~/output/$${p}/index.html ~/output ; \
|
||||
done < versions ;
|
||||
|
||||
sync-docs:
|
||||
cd ~/output && \
|
||||
echo "role_arn = ${DEPLOYMENT_ROLE_ARN}" >> /root/.aws/config ; \
|
||||
echo "CI job = ${CIRCLE_BUILD_URL}" >> version.html ; \
|
||||
aws s3 sync . s3://${WEBSITE_BUCKET} --profile terraform --delete ; \
|
||||
aws cloudfront create-invalidation --distribution-id ${CF_DISTRIBUTION_ID} --profile terraform --path "/*" ;
|
||||
.PHONY: sync-docs
|
||||
|
||||
###########################################################
|
||||
### Docker image
|
||||
|
||||
build-docker:
|
||||
cp build/tendermint DOCKER/tendermint
|
||||
cp $(OUTPUT) DOCKER/tendermint
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" DOCKER
|
||||
rm -rf DOCKER/tendermint
|
||||
|
||||
@@ -288,17 +166,22 @@ build-docker:
|
||||
### Local testnet using docker
|
||||
|
||||
# Build linux binary on other platforms
|
||||
build-linux:
|
||||
build-linux: tools
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||
|
||||
build-docker-localnode:
|
||||
cd networks/local
|
||||
make
|
||||
cd -
|
||||
@cd networks/local && make
|
||||
|
||||
# Runs `make build_c` from within an Amazon Linux (v2)-based Docker build
|
||||
# container in order to build an Amazon Linux-compatible binary. Produces a
|
||||
# compatible binary at ./build/tendermint
|
||||
build_c-amazonlinux:
|
||||
$(MAKE) -C ./DOCKER build_amazonlinux_buildimage
|
||||
docker run --rm -it -v `pwd`:/tendermint tendermint/tendermint:build_c-amazonlinux
|
||||
|
||||
# Run a 4-node testnet locally
|
||||
localnet-start: localnet-stop
|
||||
@if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi
|
||||
localnet-start: localnet-stop build-docker-localnode
|
||||
@if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --config /etc/tendermint/config-template.toml --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2; fi
|
||||
docker-compose up
|
||||
|
||||
# Stop testnet
|
||||
@@ -325,11 +208,27 @@ sentry-stop:
|
||||
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
|
||||
cd networks/remote/terraform && terraform destroy -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub"
|
||||
|
||||
# meant for the CI, inspect script & adapt accordingly
|
||||
build-slate:
|
||||
bash scripts/slate.sh
|
||||
# Build hooks for dredd, to skip or add information on some steps
|
||||
build-contract-tests-hooks:
|
||||
ifeq ($(OS),Windows_NT)
|
||||
go build -mod=readonly $(BUILD_FLAGS) -o build/contract_tests.exe ./cmd/contract_tests
|
||||
else
|
||||
go build -mod=readonly $(BUILD_FLAGS) -o build/contract_tests ./cmd/contract_tests
|
||||
endif
|
||||
|
||||
# Run a nodejs tool to test endpoints against a localnet
|
||||
# The command takes care of starting and stopping the network
|
||||
# prerequisits: build-contract-tests-hooks build-linux
|
||||
# the two build commands were not added to let this command run from generic containers or machines.
|
||||
# The binaries should be built beforehand
|
||||
contract-tests:
|
||||
dredd
|
||||
|
||||
# To avoid unintended conflicts with file names, always add to .PHONY
|
||||
# unless there is a reason not to.
|
||||
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
|
||||
.PHONY: check build build_race build_abci dist install install_abci check_dep check_tools get_tools get_dev_tools update_tools get_vendor_deps draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt rpc-docs build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all build_c install_c
|
||||
.PHONY: check build build_race build_abci dist install install_abci check_tools tools update_tools draw_deps \
|
||||
protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver fmt build-linux localnet-start \
|
||||
localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop protoc_grpc protoc_all \
|
||||
build_c install_c test_with_deadlock cleanup_after_test_with_deadlock lint build-contract-tests-hooks contract-tests \
|
||||
build_c-amazonlinux
|
||||
|
||||
158
PHILOSOPHY.md
Normal file
158
PHILOSOPHY.md
Normal file
@@ -0,0 +1,158 @@
|
||||
## Design goals
|
||||
|
||||
The design goals for Tendermint (and the SDK and related libraries) are:
|
||||
|
||||
* Simplicity and Legibility
|
||||
* Parallel performance, namely ability to utilize multicore architecture
|
||||
* Ability to evolve the codebase bug-free
|
||||
* Debuggability
|
||||
* Complete correctness that considers all edge cases, esp in concurrency
|
||||
* Future-proof modular architecture, message protocol, APIs, and encapsulation
|
||||
|
||||
|
||||
### Justification
|
||||
|
||||
Legibility is key to maintaining bug-free software as it evolves toward more
|
||||
optimizations, more ease of debugging, and additional features.
|
||||
|
||||
It is too easy to introduce bugs over time by replacing lines of code with
|
||||
those that may panic, which means ideally locks are unlocked by defer
|
||||
statements.
|
||||
|
||||
For example,
|
||||
|
||||
```go
|
||||
func (obj *MyObj) something() {
|
||||
mtx.Lock()
|
||||
obj.something = other
|
||||
mtx.Unlock()
|
||||
}
|
||||
```
|
||||
|
||||
It is too easy to refactor the codebase in the future to replace `other` with
|
||||
`other.String()` for example, and this may introduce a bug that causes a
|
||||
deadlock. So as much as reasonably possible, we need to be using defer
|
||||
statements, even though it introduces additional overhead.
|
||||
|
||||
If it is necessary to optimize the unlocking of mutex locks, the solution is
|
||||
more modularity via smaller functions, so that defer'd unlocks are scoped
|
||||
within a smaller function.
|
||||
|
||||
Similarly, idiomatic for-loops should always be preferred over those that use
|
||||
custom counters, because it is too easy to evolve the body of a for-loop to
|
||||
become more complicated over time, and it becomes more and more difficult to
|
||||
assess the correctness of such a for-loop by visual inspection.
|
||||
|
||||
|
||||
### On performance
|
||||
|
||||
It doesn't matter whether there are alternative implementations that are 2x or
|
||||
3x more performant, when the software doesn't work, deadlocks, or if bugs
|
||||
cannot be debugged. By taking advantage of multicore concurrency, the
|
||||
Tendermint implementation will at least be an order of magnitude within the
|
||||
range of what is theoretically possible. The design philosophy of Tendermint,
|
||||
and the choice of Go as implementation language, is designed to make Tendermint
|
||||
implementation the standard specification for concurrent BFT software.
|
||||
|
||||
By focusing on the message protocols (e.g. ABCI, p2p messages), and
|
||||
encapsulation e.g. IAVL module, (relatively) independent reactors, we are both
|
||||
implementing a standard implementation to be used as the specification for
|
||||
future implementations in more optimizable languages like Rust, Java, and C++;
|
||||
as well as creating sufficiently performant software. Tendermint Core will
|
||||
never be as fast as future implementations of the Tendermint Spec, because Go
|
||||
isn't designed to be as fast as possible. The advantage of using Go is that we
|
||||
can develop the whole stack of modular components **faster** than in other
|
||||
languages.
|
||||
|
||||
Furthermore, the real bottleneck is in the application layer, and it isn't
|
||||
necessary to support more than a sufficiently decentralized set of validators
|
||||
(e.g. 100 ~ 300 validators is sufficient, with delegated bonded PoS).
|
||||
|
||||
Instead of optimizing Tendermint performance down to the metal, lets focus on
|
||||
optimizing on other matters, namely ability to push feature complete software
|
||||
that works well enough, can be debugged and maintained, and can serve as a spec
|
||||
for future implementations.
|
||||
|
||||
|
||||
### On encapsulation
|
||||
|
||||
In order to create maintainable, forward-optimizable software, it is critical
|
||||
to develop well-encapsulated objects that have well understood properties, and
|
||||
to re-use these easy-to-use-correctly components as building blocks for further
|
||||
encapsulated meta-objects.
|
||||
|
||||
For example, mutexes are cheap enough for Tendermint's design goals when there
|
||||
isn't goroutine contention, so it is encouraged to create concurrency safe
|
||||
structures with struct-level mutexes. If they are used in the context of
|
||||
non-concurrent logic, then the performance is good enough. If they are used in
|
||||
the context of concurrent logic, then it will still perform correctly.
|
||||
|
||||
Examples of this design principle can be seen in the types.ValidatorSet struct,
|
||||
and the rand.Rand struct. It's one single struct declaration that can be used
|
||||
in both concurrent and non-concurrent logic, and due to its well encapsulation,
|
||||
it's easy to get the usage of the mutex right.
|
||||
|
||||
#### example: rand.Rand:
|
||||
|
||||
`The default Source is safe for concurrent use by multiple goroutines, but
|
||||
Sources created by NewSource are not`. The reason why the default
|
||||
package-level source is safe for concurrent use is because it is protected (see
|
||||
`lockedSource` in https://golang.org/src/math/rand/rand.go).
|
||||
|
||||
But we shouldn't rely on the global source, we should be creating our own
|
||||
Rand/Source instances and using them, especially for determinism in testing.
|
||||
So it is reasonable to have rand.Rand be protected by a mutex. Whether we want
|
||||
our own implementation of Rand is another question, but the answer there is
|
||||
also in the affirmative. Sometimes you want to know where Rand is being used
|
||||
in your code, so it becomes a simple matter of dropping in a log statement to
|
||||
inject inspectability into Rand usage. Also, it is nice to be able to extend
|
||||
the functionality of Rand with custom methods. For these reasons, and for the
|
||||
reasons which is outlined in this design philosophy document, we should
|
||||
continue to use the rand.Rand object, with mutex protection.
|
||||
|
||||
Another key aspect of good encapsulation is the choice of exposed vs unexposed
|
||||
methods. It should be clear to the reader of the code, which methods are
|
||||
intended to be used in what context, and what safe usage is. Part of this is
|
||||
solved by hiding methods via unexported methods. Another part of this is
|
||||
naming conventions on the methods (e.g. underscores) with good documentation,
|
||||
and code organization. If there are too many exposed methods and it isn't
|
||||
clear what methods have what side effects, then there is something wrong about
|
||||
the design of abstractions that should be revisited.
|
||||
|
||||
|
||||
### On concurrency
|
||||
|
||||
In order for Tendermint to remain relevant in the years to come, it is vital
|
||||
for Tendermint to take advantage of multicore architectures. Due to the nature
|
||||
of the problem, namely consensus across a concurrent p2p gossip network, and to
|
||||
handle RPC requests for a large number of consuming subscribers, it is
|
||||
unavoidable for Tendermint development to require expertise in concurrency
|
||||
design, especially when it comes to the reactor design, and also for RPC
|
||||
request handling.
|
||||
|
||||
|
||||
## Guidelines
|
||||
|
||||
Here are some guidelines for designing for (sufficient) performance and concurrency:
|
||||
|
||||
* Mutex locks are cheap enough when there isn't contention.
|
||||
* Do not optimize code without analytical or observed proof that it is in a hot path.
|
||||
* Don't over-use channels when mutex locks w/ encapsulation are sufficient.
|
||||
* The need to drain channels are often a hint of unconsidered edge cases.
|
||||
* The creation of O(N) one-off goroutines is generally technical debt that
|
||||
needs to get addressed sooner than later. Avoid creating too many
|
||||
goroutines as a patch around incomplete concurrency design, or at least be
|
||||
aware of the debt and do not invest in the debt. On the other hand, Tendermint
|
||||
is designed to have a limited number of peers (e.g. 10 or 20), so the creation
|
||||
of O(C) goroutines per O(P) peers is still O(C\*P=constant).
|
||||
* Use defer statements to unlock as much as possible. If you want to unlock sooner,
|
||||
try to create more modular functions that do make use of defer statements.
|
||||
|
||||
## Matras
|
||||
|
||||
* Premature optimization kills
|
||||
* Readability is paramount
|
||||
* Beautiful is better than fast.
|
||||
* In the face of ambiguity, refuse the temptation to guess.
|
||||
* In the face of bugs, refuse the temptation to cover the bug.
|
||||
* There should be one-- and preferably only one --obvious way to do it.
|
||||
77
README.md
77
README.md
@@ -1,39 +1,39 @@
|
||||
# Tendermint
|
||||
|
||||

|
||||
|
||||
[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance)
|
||||
[State Machines](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
Or [Blockchain](https://en.wikipedia.org/wiki/Blockchain_(database)), for short.
|
||||
Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for short.
|
||||
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
[](https://godoc.org/github.com/tendermint/tendermint)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://godoc.org/github.com/tendermint/tendermint)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://riot.im/app/#/room/#tendermint:matrix.org)
|
||||
[](https://github.com/tendermint/tendermint/blob/master/LICENSE)
|
||||
[](https://github.com/tendermint/tendermint)
|
||||
|
||||
|
||||
Branch | Tests | Coverage
|
||||
----------|-------|----------
|
||||
master | [](https://circleci.com/gh/tendermint/tendermint/tree/master) | [](https://codecov.io/gh/tendermint/tendermint)
|
||||
develop | [](https://circleci.com/gh/tendermint/tendermint/tree/develop) | [](https://codecov.io/gh/tendermint/tendermint)
|
||||
| Branch | Tests | Coverage |
|
||||
| ------ | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| master | [](https://circleci.com/gh/tendermint/tendermint/tree/master) | [](https://codecov.io/gh/tendermint/tendermint) |
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
|
||||
and securely replicates it on many machines.
|
||||
|
||||
For protocol details, see [the specification](/docs/spec).
|
||||
For protocol details, see [the specification](https://github.com/tendermint/spec).
|
||||
|
||||
For detailed analysis of the consensus protocol, including safety and liveness proofs,
|
||||
see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
|
||||
|
||||
## A Note on Production Readiness
|
||||
## Releases
|
||||
|
||||
While Tendermint is being used in production in private, permissioned
|
||||
environments, we are still working actively to harden and audit it in preparation
|
||||
for use in public blockchains, such as the [Cosmos Network](https://cosmos.network/).
|
||||
We are also still making breaking changes to the protocol and the APIs.
|
||||
Thus, we tag the releases as *alpha software*.
|
||||
NOTE: The master branch is now an active development branch (starting with `v0.32`). Please, do not depend on it and
|
||||
use [releases](https://github.com/tendermint/tendermint/releases) instead.
|
||||
|
||||
Tendermint is being used in production in both private and public environments,
|
||||
most notably the blockchains of the [Cosmos Network](https://cosmos.network/).
|
||||
However, we are still making breaking changes to the protocol and the APIs and have not yet released v1.0.
|
||||
See below for more details about [versioning](#versioning).
|
||||
|
||||
In any case, if you intend to run Tendermint in production,
|
||||
please [contact us](mailto:partners@tendermint.com) and [join the chat](https://riot.im/app/#/room/#tendermint:matrix.org).
|
||||
@@ -47,13 +47,13 @@ For examples of the kinds of bugs we're looking for, see [SECURITY.md](SECURITY.
|
||||
|
||||
## Minimum requirements
|
||||
|
||||
Requirement|Notes
|
||||
---|---
|
||||
Go version | Go1.11.4 or higher
|
||||
| Requirement | Notes |
|
||||
| ----------- | ------------------ |
|
||||
| Go version | Go1.13 or higher |
|
||||
|
||||
## Documentation
|
||||
|
||||
Complete documentation can be found on the [website](https://tendermint.com/docs/).
|
||||
Complete documentation can be found on the [website](https://docs.tendermint.com/master/).
|
||||
|
||||
### Install
|
||||
|
||||
@@ -74,12 +74,11 @@ and the [contributing guidelines](CONTRIBUTING.md) when submitting code.
|
||||
Join the larger community on the [forum](https://forum.cosmos.network/) and the [chat](https://riot.im/app/#/room/#tendermint:matrix.org).
|
||||
|
||||
To learn more about the structure of the software, watch the [Developer
|
||||
Sessions](https://www.youtube.com/playlist?list=PLdQIb0qr3pnBbG5ZG-0gr3zM86_s8Rpqv)
|
||||
and read some [Architectural
|
||||
Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture).
|
||||
Sessions](/docs/DEV_SESSIONS.md) and read some [Architectural Decision
|
||||
Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture).
|
||||
|
||||
Learn more by reading the code and comparing it to the
|
||||
[specification](https://github.com/tendermint/tendermint/tree/develop/docs/spec).
|
||||
[specification](https://github.com/tendermint/spec).
|
||||
|
||||
## Versioning
|
||||
|
||||
@@ -96,6 +95,7 @@ include the in-process Go APIs.
|
||||
That said, breaking changes in the following packages will be documented in the
|
||||
CHANGELOG even if they don't lead to MINOR version bumps:
|
||||
|
||||
- crypto
|
||||
- types
|
||||
- rpc/client
|
||||
- config
|
||||
@@ -132,30 +132,31 @@ For details about the blockchain data structures and the p2p protocols, see the
|
||||
[Tendermint specification](/docs/spec).
|
||||
|
||||
For details on using the software, see the [documentation](/docs/) which is also
|
||||
hosted at: https://tendermint.com/docs/
|
||||
hosted at: https://docs.tendermint.com/master/
|
||||
|
||||
### Tools
|
||||
|
||||
Benchmarking and monitoring is provided by `tm-bench` and `tm-monitor`, respectively.
|
||||
Their code is found [here](/tools) and these binaries need to be built seperately.
|
||||
Benchmarking is provided by `tm-load-test`.
|
||||
The code for `tm-load-test` can be found [here](https://github.com/interchainio/tm-load-test) this binary needs to be built separately.
|
||||
Additional documentation is found [here](/docs/tools).
|
||||
|
||||
### Sub-projects
|
||||
|
||||
* [Amino](http://github.com/tendermint/go-amino), reflection-based proto3, with
|
||||
- [Amino](http://github.com/tendermint/go-amino), reflection-based proto3, with
|
||||
interfaces
|
||||
* [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation
|
||||
- [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation
|
||||
- [Tm-db](http://github.com/tendermint/tm-db), Data Base abstractions to be used in applications.
|
||||
|
||||
### Applications
|
||||
|
||||
* [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
||||
* [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
|
||||
* [Many more](https://tendermint.com/ecosystem)
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
||||
- [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
|
||||
- [Many more](https://tendermint.com/ecosystem)
|
||||
|
||||
### Research
|
||||
|
||||
* [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)
|
||||
* [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
|
||||
* [Original Whitepaper](https://tendermint.com/static/docs/tendermint.pdf)
|
||||
* [Blog](https://blog.cosmos.network/tendermint/home)
|
||||
|
||||
- [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)
|
||||
- [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
|
||||
- [Original Whitepaper](https://github.com/tendermint/spec)
|
||||
- You can find the link at the bottom of the readme
|
||||
- [Blog](https://blog.cosmos.network/tendermint/home)
|
||||
|
||||
23
ROADMAP.md
23
ROADMAP.md
@@ -1,23 +0,0 @@
|
||||
# Roadmap
|
||||
|
||||
BREAKING CHANGES:
|
||||
- Better support for injecting randomness
|
||||
- Upgrade consensus for more real-time use of evidence
|
||||
|
||||
FEATURES:
|
||||
- Use the chain as its own CA for nodes and validators
|
||||
- Tooling to run multiple blockchains/apps, possibly in a single process
|
||||
- State syncing (without transaction replay)
|
||||
- Add authentication and rate-limitting to the RPC
|
||||
|
||||
IMPROVEMENTS:
|
||||
- Improve subtleties around mempool caching and logic
|
||||
- Consensus optimizations:
|
||||
- cache block parts for faster agreement after round changes
|
||||
- propagate block parts rarest first
|
||||
- Better testing of the consensus state machine (ie. use a DSL)
|
||||
- Auto compiled serialization/deserialization code instead of go-wire reflection
|
||||
|
||||
BUG FIXES:
|
||||
- Graceful handling/recovery for apps that have non-determinism or fail to halt
|
||||
- Graceful handling/recovery for violations of safety, or liveness
|
||||
286
UPGRADING.md
286
UPGRADING.md
@@ -3,6 +3,291 @@
|
||||
This guide provides steps to be followed when you upgrade your applications to
|
||||
a newer version of Tendermint Core.
|
||||
|
||||
## v0.33.0
|
||||
|
||||
This release is not compatible with previous blockchains due to commit becoming signatures only and fields in the header have been removed.
|
||||
|
||||
### Config Changes
|
||||
|
||||
You will need to generate a new config if you have used a prior version of tendermint.
|
||||
|
||||
- Tags have been entirely renamed throughout the codebase to events and there keys are called [compositeKeys](https://github.com/tendermint/tendermint/blob/6d05c531f7efef6f0619155cf10ae8557dd7832f/docs/app-dev/indexing-transactions.md).
|
||||
- Evidence Params has been changed to include duration.
|
||||
- `consensus_params.evidence.max_age_duration`.
|
||||
- Renamed `consensus_params.evidence.max_age` to `max_age_num_blocks`.
|
||||
|
||||
### Go API
|
||||
|
||||
- `libs/common` has been removed in favor of specific pkgs.
|
||||
- `async`
|
||||
- `service`
|
||||
- `rand`
|
||||
- `net`
|
||||
- `strings`
|
||||
- `cmap`
|
||||
- removal of `errors` pkg
|
||||
|
||||
### RPC Changes
|
||||
|
||||
- `/validators` is now paginated (default: 30 vals per page)
|
||||
- `/block_results` response format updated [see RPC docs for details](https://docs.tendermint.com/master/rpc/#/Info/block_results)
|
||||
- Event suffix has been removed from the ID in event responses
|
||||
- IDs are now integers not `json-client-XYZ`
|
||||
|
||||
## v0.32.0
|
||||
|
||||
This release is compatible with previous blockchains,
|
||||
however the new ABCI Events mechanism may create some complexity
|
||||
for nodes wishing to continue operation with v0.32 from a previous version.
|
||||
There are some minor breaking changes to the RPC.
|
||||
|
||||
### Config Changes
|
||||
|
||||
If you have `db_backend` set to `leveldb` in your config file, please change it
|
||||
to `goleveldb` or `cleveldb`.
|
||||
|
||||
### RPC Changes
|
||||
|
||||
The default listen address for the RPC is now `127.0.0.1`. If you want to expose
|
||||
it publicly, you have to explicitly configure it. Note exposing the RPC to the
|
||||
public internet may not be safe - endpoints which return a lot of data may
|
||||
enable resource exhaustion attacks on your node, causing the process to crash.
|
||||
|
||||
Any consumers of `/block_results` need to be mindful of the change in all field
|
||||
names from CamelCase to Snake case, eg. `results.DeliverTx` is now `results.deliver_tx`.
|
||||
This is a fix, but it's breaking.
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
ABCI responses which previously had a `Tags` field now have an `Events` field
|
||||
instead. The original `Tags` field was simply a list of key-value pairs, where
|
||||
each key effectively represented some attribute of an event occuring in the
|
||||
blockchain, like `sender`, `receiver`, or `amount`. However, it was difficult to
|
||||
represent the occurence of multiple events (for instance, multiple transfers) in a single list.
|
||||
The new `Events` field contains a list of `Event`, where each `Event` is itself a list
|
||||
of key-value pairs, allowing for more natural expression of multiple events in
|
||||
eg. a single DeliverTx or EndBlock. Note each `Event` also includes a `Type`, which is meant to categorize the
|
||||
event.
|
||||
|
||||
For transaction indexing, the index key is
|
||||
prefixed with the event type: `{eventType}.{attributeKey}`.
|
||||
If the same event type and attribute key appear multiple times, the values are
|
||||
appended in a list.
|
||||
|
||||
To make queries, include the event type as a prefix. For instance if you
|
||||
previously queried for `recipient = 'XYZ'`, and after the upgrade you name your event `transfer`,
|
||||
the new query would be for `transfer.recipient = 'XYZ'`.
|
||||
|
||||
Note that transactions indexed on a node before upgrading to v0.32 will still be indexed
|
||||
using the old scheme. For instance, if a node upgraded at height 100,
|
||||
transactions before 100 would be queried with `recipient = 'XYZ'` and
|
||||
transactions after 100 would be queried with `transfer.recipient = 'XYZ'`.
|
||||
While this presents additional complexity to clients, it avoids the need to
|
||||
reindex. Of course, you can reset the node and sync from scratch to re-index
|
||||
entirely using the new scheme.
|
||||
|
||||
We illustrate further with a more complete example.
|
||||
|
||||
Prior to the update, suppose your `ResponseDeliverTx` look like:
|
||||
|
||||
```go
|
||||
abci.ResponseDeliverTx{
|
||||
Tags: []kv.Pair{
|
||||
{Key: []byte("sender"), Value: []byte("foo")},
|
||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||
{Key: []byte("amount"), Value: []byte("35")},
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The following queries would match this transaction:
|
||||
|
||||
```go
|
||||
query.MustParse("tm.event = 'Tx' AND sender = 'foo'")
|
||||
query.MustParse("tm.event = 'Tx' AND recipient = 'bar'")
|
||||
query.MustParse("tm.event = 'Tx' AND sender = 'foo' AND recipient = 'bar'")
|
||||
```
|
||||
|
||||
Following the upgrade, your `ResponseDeliverTx` would look something like:
|
||||
the following `Events`:
|
||||
|
||||
```go
|
||||
abci.ResponseDeliverTx{
|
||||
Events: []abci.Event{
|
||||
{
|
||||
Type: "transfer",
|
||||
Attributes: kv.Pairs{
|
||||
{Key: []byte("sender"), Value: []byte("foo")},
|
||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||
{Key: []byte("amount"), Value: []byte("35")},
|
||||
},
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Now the following queries would match this transaction:
|
||||
|
||||
```go
|
||||
query.MustParse("tm.event = 'Tx' AND transfer.sender = 'foo'")
|
||||
query.MustParse("tm.event = 'Tx' AND transfer.recipient = 'bar'")
|
||||
query.MustParse("tm.event = 'Tx' AND transfer.sender = 'foo' AND transfer.recipient = 'bar'")
|
||||
```
|
||||
|
||||
For further documentation on `Events`, see the [docs](https://github.com/tendermint/tendermint/blob/60827f75623b92eff132dc0eff5b49d2025c591e/docs/spec/abci/abci.md#events).
|
||||
|
||||
### Go Applications
|
||||
|
||||
The ABCI Application interface changed slightly so the CheckTx and DeliverTx
|
||||
methods now take Request structs. The contents of these structs are just the raw
|
||||
tx bytes, which were previously passed in as the argument.
|
||||
|
||||
## v0.31.6
|
||||
|
||||
There are no breaking changes in this release except Go API of p2p and
|
||||
mempool packages. Hovewer, if you're using cleveldb, you'll need to change
|
||||
the compilation tag:
|
||||
|
||||
Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
|
||||
use `make build_c` / `make install_c` (full instructions can be found at
|
||||
https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support)
|
||||
|
||||
## v0.31.0
|
||||
|
||||
This release contains a breaking change to the behaviour of the pubsub system.
|
||||
It also contains some minor breaking changes in the Go API and ABCI.
|
||||
There are no changes to the block or p2p protocols, so v0.31.0 should work fine
|
||||
with blockchains created from the v0.30 series.
|
||||
|
||||
### RPC
|
||||
|
||||
The pubsub no longer blocks on publishing. This may cause some WebSocket (WS) clients to stop working as expected.
|
||||
If your WS client is not consuming events fast enough, Tendermint can terminate the subscription.
|
||||
In this case, the WS client will receive an error with description:
|
||||
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": "{ID}#event",
|
||||
"error": {
|
||||
"code": -32000,
|
||||
"msg": "Server error",
|
||||
"data": "subscription was cancelled (reason: client is not pulling messages fast enough)" // or "subscription was cancelled (reason: Tendermint exited)"
|
||||
}
|
||||
}
|
||||
|
||||
Additionally, there are now limits on the number of subscribers and
|
||||
subscriptions that can be active at once. See the new
|
||||
`rpc.max_subscription_clients` and `rpc.max_subscriptions_per_client` values to
|
||||
configure this.
|
||||
```
|
||||
|
||||
### Applications
|
||||
|
||||
Simple rename of `ConsensusParams.BlockSize` to `ConsensusParams.Block`.
|
||||
|
||||
The `ConsensusParams.Block.TimeIotaMS` field was also removed. It's configured
|
||||
in the ConsensusParsm in genesis.
|
||||
|
||||
### Go API
|
||||
|
||||
See the [CHANGELOG](CHANGELOG.md). These are relatively straight forward.
|
||||
|
||||
## v0.30.0
|
||||
|
||||
This release contains a breaking change to both the block and p2p protocols,
|
||||
however it may be compatible with blockchains created with v0.29.0 depending on
|
||||
the chain history. If your blockchain has not included any pieces of evidence,
|
||||
or no piece of evidence has been included in more than one block,
|
||||
and if your application has never returned multiple updates
|
||||
for the same validator in a single block, then v0.30.0 will work fine with
|
||||
blockchains created with v0.29.0.
|
||||
|
||||
The p2p protocol change is to fix the proposer selection algorithm again.
|
||||
Note that proposer selection is purely a p2p concern right
|
||||
now since the algorithm is only relevant during real time consensus.
|
||||
This change is thus compatible with v0.29.0, but
|
||||
all nodes must be upgraded to avoid disagreements on the proposer.
|
||||
|
||||
### Applications
|
||||
|
||||
Applications must ensure they do not return duplicates in
|
||||
`ResponseEndBlock.ValidatorUpdates`. A pubkey must only appear once per set of
|
||||
updates. Duplicates will cause irrecoverable failure. If you have a very good
|
||||
reason why we shouldn't do this, please open an issue.
|
||||
|
||||
## v0.29.0
|
||||
|
||||
This release contains some breaking changes to the block and p2p protocols,
|
||||
and will not be compatible with any previous versions of the software, primarily
|
||||
due to changes in how various data structures are hashed.
|
||||
|
||||
Any implementations of Tendermint blockchain verification, including lite clients,
|
||||
will need to be updated. For specific details:
|
||||
|
||||
- [Merkle tree](./docs/spec/blockchain/encoding.md#merkle-trees)
|
||||
- [ConsensusParams](./docs/spec/blockchain/state.md#consensusparams)
|
||||
|
||||
There was also a small change to field ordering in the vote struct. Any
|
||||
implementations of an out-of-process validator (like a Key-Management Server)
|
||||
will need to be updated. For specific details:
|
||||
|
||||
- [Vote](https://github.com/tendermint/tendermint/blob/master/docs/spec/consensus/signing.md#votes)
|
||||
|
||||
Finally, the proposer selection algorithm continues to evolve. See the
|
||||
[work-in-progress
|
||||
specification](https://github.com/tendermint/tendermint/pull/3140).
|
||||
|
||||
For everything else, please see the [CHANGELOG](./CHANGELOG.md#v0.29.0).
|
||||
|
||||
## v0.28.0
|
||||
|
||||
This release breaks the format for the `priv_validator.json` file
|
||||
and the protocol used for the external validator process.
|
||||
It is compatible with v0.27.0 blockchains (neither the BlockProtocol nor the
|
||||
P2PProtocol have changed).
|
||||
|
||||
Please read carefully for details about upgrading.
|
||||
|
||||
**Note:** Backup your `config/priv_validator.json`
|
||||
before proceeding.
|
||||
|
||||
### `priv_validator.json`
|
||||
|
||||
The `config/priv_validator.json` is now two files:
|
||||
`config/priv_validator_key.json` and `data/priv_validator_state.json`.
|
||||
The former contains the key material, the later contains the details on the last
|
||||
message signed.
|
||||
|
||||
When running v0.28.0 for the first time, it will back up any pre-existing
|
||||
`priv_validator.json` file and proceed to split it into the two new files.
|
||||
Upgrading should happen automatically without problem.
|
||||
|
||||
To upgrade manually, use the provided `privValUpgrade.go` script, with exact paths for the old
|
||||
`priv_validator.json` and the locations for the two new files. It's recomended
|
||||
to use the default paths, of `config/priv_validator_key.json` and
|
||||
`data/priv_validator_state.json`, respectively:
|
||||
|
||||
```
|
||||
go run scripts/privValUpgrade.go <old-path> <new-key-path> <new-state-path>
|
||||
```
|
||||
|
||||
### External validator signers
|
||||
|
||||
The Unix and TCP implementations of the remote signing validator
|
||||
have been consolidated into a single implementation.
|
||||
Thus in both cases, the external process is expected to dial
|
||||
Tendermint. This is different from how Unix sockets used to work, where
|
||||
Tendermint dialed the external process.
|
||||
|
||||
The `PubKeyMsg` was also split into separate `Request` and `Response` types
|
||||
for consistency with other messages.
|
||||
|
||||
Note that the TCP sockets don't yet use a persistent key,
|
||||
so while they're encrypted, they can't yet be properly authenticated.
|
||||
See [#3105](https://github.com/tendermint/tendermint/issues/3105).
|
||||
Note the Unix socket has neither encryption nor authentication, but will
|
||||
add a shared-secret in [#3099](https://github.com/tendermint/tendermint/issues/3099).
|
||||
|
||||
## v0.27.0
|
||||
|
||||
This release contains some breaking changes to the block and p2p protocols,
|
||||
@@ -185,7 +470,6 @@ required to maintain a map from validator addresses to pubkeys since v0.23 (when
|
||||
pubkeys were removed from RequestBeginBlock), but now they may need to track
|
||||
multiple validator sets at once to accomodate this delay.
|
||||
|
||||
|
||||
### Block Size
|
||||
|
||||
The `ConsensusParams.BlockSize.MaxTxs` was removed in favour of
|
||||
|
||||
12
Vagrantfile
vendored
12
Vagrantfile
vendored
@@ -29,10 +29,14 @@ Vagrant.configure("2") do |config|
|
||||
usermod -a -G docker vagrant
|
||||
|
||||
# install go
|
||||
wget -q https://dl.google.com/go/go1.11.linux-amd64.tar.gz
|
||||
tar -xvf go1.11.linux-amd64.tar.gz
|
||||
wget -q https://dl.google.com/go/go1.13.linux-amd64.tar.gz
|
||||
tar -xvf go1.13.linux-amd64.tar.gz
|
||||
mv go /usr/local
|
||||
rm -f go1.11.linux-amd64.tar.gz
|
||||
rm -f go1.13.linux-amd64.tar.gz
|
||||
|
||||
# install nodejs (for docs)
|
||||
curl -sL https://deb.nodesource.com/setup_11.x | bash -
|
||||
apt-get install -y nodejs
|
||||
|
||||
# cleanup
|
||||
apt-get autoremove -y
|
||||
@@ -53,6 +57,6 @@ Vagrant.configure("2") do |config|
|
||||
|
||||
# get all deps and tools, ready to install/test
|
||||
su - vagrant -c 'source /home/vagrant/.bash_profile'
|
||||
su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make get_tools && make get_dev_tools && make get_vendor_deps'
|
||||
su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make tools'
|
||||
SHELL
|
||||
end
|
||||
|
||||
@@ -8,7 +8,7 @@ can manage an application state running in another.
|
||||
|
||||
Previously, the ABCI was referred to as TMSP.
|
||||
|
||||
The community has provided a number of addtional implementations, see the [Tendermint Ecosystem](https://tendermint.com/ecosystem)
|
||||
The community has provided a number of additional implementations, see the [Tendermint Ecosystem](https://github.com/tendermint/awesome#ecosystem)
|
||||
|
||||
|
||||
## Installation & Usage
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -19,7 +19,7 @@ const (
|
||||
// Note these are client errors, eg. ABCI socket connectivity issues.
|
||||
// Application-related errors are reflected in response via ABCI error codes and logs.
|
||||
type Client interface {
|
||||
cmn.Service
|
||||
service.Service
|
||||
|
||||
SetResponseCallback(Callback)
|
||||
Error() error
|
||||
@@ -28,8 +28,8 @@ type Client interface {
|
||||
EchoAsync(msg string) *ReqRes
|
||||
InfoAsync(types.RequestInfo) *ReqRes
|
||||
SetOptionAsync(types.RequestSetOption) *ReqRes
|
||||
DeliverTxAsync(tx []byte) *ReqRes
|
||||
CheckTxAsync(tx []byte) *ReqRes
|
||||
DeliverTxAsync(types.RequestDeliverTx) *ReqRes
|
||||
CheckTxAsync(types.RequestCheckTx) *ReqRes
|
||||
QueryAsync(types.RequestQuery) *ReqRes
|
||||
CommitAsync() *ReqRes
|
||||
InitChainAsync(types.RequestInitChain) *ReqRes
|
||||
@@ -40,8 +40,8 @@ type Client interface {
|
||||
EchoSync(msg string) (*types.ResponseEcho, error)
|
||||
InfoSync(types.RequestInfo) (*types.ResponseInfo, error)
|
||||
SetOptionSync(types.RequestSetOption) (*types.ResponseSetOption, error)
|
||||
DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error)
|
||||
CheckTxSync(tx []byte) (*types.ResponseCheckTx, error)
|
||||
DeliverTxSync(types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
|
||||
CheckTxSync(types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||
QuerySync(types.RequestQuery) (*types.ResponseQuery, error)
|
||||
CommitSync() (*types.ResponseCommit, error)
|
||||
InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
@@ -60,7 +60,7 @@ func NewClient(addr, transport string, mustConnect bool) (client Client, err err
|
||||
case "grpc":
|
||||
client = NewGRPCClient(addr, mustConnect)
|
||||
default:
|
||||
err = fmt.Errorf("Unknown abci transport %s", transport)
|
||||
err = fmt.Errorf("unknown abci transport %s", transport)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -6,11 +6,12 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
var _ Client = (*grpcClient)(nil)
|
||||
@@ -18,7 +19,7 @@ var _ Client = (*grpcClient)(nil)
|
||||
// A stripped copy of the remoteClient that makes
|
||||
// synchronous calls using grpc
|
||||
type grpcClient struct {
|
||||
cmn.BaseService
|
||||
service.BaseService
|
||||
mustConnect bool
|
||||
|
||||
client types.ABCIApplicationClient
|
||||
@@ -35,12 +36,12 @@ func NewGRPCClient(addr string, mustConnect bool) *grpcClient {
|
||||
addr: addr,
|
||||
mustConnect: mustConnect,
|
||||
}
|
||||
cli.BaseService = *cmn.NewBaseService(nil, "grpcClient", cli)
|
||||
cli.BaseService = *service.NewBaseService(nil, "grpcClient", cli)
|
||||
return cli
|
||||
}
|
||||
|
||||
func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
return cmn.Connect(addr)
|
||||
func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return tmnet.Connect(addr)
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OnStart() error {
|
||||
@@ -49,7 +50,7 @@ func (cli *grpcClient) OnStart() error {
|
||||
}
|
||||
RETRY_LOOP:
|
||||
for {
|
||||
conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc))
|
||||
conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
if cli.mustConnect {
|
||||
return err
|
||||
@@ -65,7 +66,7 @@ RETRY_LOOP:
|
||||
|
||||
ENSURE_CONNECTED:
|
||||
for {
|
||||
_, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.FailFast(true))
|
||||
_, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.WaitForReady(true))
|
||||
if err == nil {
|
||||
break ENSURE_CONNECTED
|
||||
}
|
||||
@@ -125,101 +126,101 @@ func (cli *grpcClient) SetResponseCallback(resCb Callback) {
|
||||
|
||||
func (cli *grpcClient) EchoAsync(msg string) *ReqRes {
|
||||
req := types.ToRequestEcho(msg)
|
||||
res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.FailFast(true))
|
||||
res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{Echo: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) FlushAsync() *ReqRes {
|
||||
req := types.ToRequestFlush()
|
||||
res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.FailFast(true))
|
||||
res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{Flush: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
|
||||
req := types.ToRequestInfo(params)
|
||||
res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.FailFast(true))
|
||||
res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
|
||||
req := types.ToRequestSetOption(params)
|
||||
res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.FailFast(true))
|
||||
res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{SetOption: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxAsync(tx []byte) *ReqRes {
|
||||
req := types.ToRequestDeliverTx(tx)
|
||||
res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.FailFast(true))
|
||||
func (cli *grpcClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
req := types.ToRequestDeliverTx(params)
|
||||
res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes {
|
||||
req := types.ToRequestCheckTx(tx)
|
||||
res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.FailFast(true))
|
||||
func (cli *grpcClient) CheckTxAsync(params types.RequestCheckTx) *ReqRes {
|
||||
req := types.ToRequestCheckTx(params)
|
||||
res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
|
||||
req := types.ToRequestQuery(params)
|
||||
res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.FailFast(true))
|
||||
res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{Query: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CommitAsync() *ReqRes {
|
||||
req := types.ToRequestCommit()
|
||||
res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.FailFast(true))
|
||||
res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{Commit: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
|
||||
req := types.ToRequestInitChain(params)
|
||||
res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.FailFast(true))
|
||||
res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
|
||||
req := types.ToRequestBeginBlock(params)
|
||||
res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.FailFast(true))
|
||||
res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
|
||||
req := types.ToRequestEndBlock(params)
|
||||
res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.FailFast(true))
|
||||
res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes {
|
||||
@@ -228,18 +229,22 @@ func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response)
|
||||
reqres.Done() // Release waiters
|
||||
reqres.SetDone() // so reqRes.SetCallback will run the callback
|
||||
|
||||
// go routine for callbacks
|
||||
// goroutine for callbacks
|
||||
go func() {
|
||||
// Notify reqRes listener if set
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(res)
|
||||
}
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
// Notify client listener if set
|
||||
if cli.resCb != nil {
|
||||
cli.resCb(reqres.Request, res)
|
||||
}
|
||||
|
||||
// Notify reqRes listener if set
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(res)
|
||||
}
|
||||
}()
|
||||
|
||||
return reqres
|
||||
}
|
||||
|
||||
@@ -265,13 +270,13 @@ func (cli *grpcClient) SetOptionSync(req types.RequestSetOption) (*types.Respons
|
||||
return reqres.Response.GetSetOption(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.DeliverTxAsync(tx)
|
||||
func (cli *grpcClient) DeliverTxSync(params types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.DeliverTxAsync(params)
|
||||
return reqres.Response.GetDeliverTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) {
|
||||
reqres := cli.CheckTxAsync(tx)
|
||||
func (cli *grpcClient) CheckTxSync(params types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
reqres := cli.CheckTxAsync(params)
|
||||
return reqres.Response.GetCheckTx(), cli.Error()
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"sync"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
var _ Client = (*localClient)(nil)
|
||||
@@ -14,7 +14,7 @@ var _ Client = (*localClient)(nil)
|
||||
// methods like CheckTx (/broadcast_tx_* RPC endpoint) or Query (/abci_query
|
||||
// RPC endpoint), but defers are used everywhere for the sake of consistency.
|
||||
type localClient struct {
|
||||
cmn.BaseService
|
||||
service.BaseService
|
||||
|
||||
mtx *sync.Mutex
|
||||
types.Application
|
||||
@@ -29,7 +29,7 @@ func NewLocalClient(mtx *sync.Mutex, app types.Application) *localClient {
|
||||
mtx: mtx,
|
||||
Application: app,
|
||||
}
|
||||
cli.BaseService = *cmn.NewBaseService(nil, "localClient", cli)
|
||||
cli.BaseService = *service.NewBaseService(nil, "localClient", cli)
|
||||
return cli
|
||||
}
|
||||
|
||||
@@ -81,24 +81,24 @@ func (app *localClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxAsync(tx []byte) *ReqRes {
|
||||
func (app *localClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.DeliverTx(tx)
|
||||
res := app.Application.DeliverTx(params)
|
||||
return app.callback(
|
||||
types.ToRequestDeliverTx(tx),
|
||||
types.ToRequestDeliverTx(params),
|
||||
types.ToResponseDeliverTx(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxAsync(tx []byte) *ReqRes {
|
||||
func (app *localClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.CheckTx(tx)
|
||||
res := app.Application.CheckTx(req)
|
||||
return app.callback(
|
||||
types.ToRequestCheckTx(tx),
|
||||
types.ToRequestCheckTx(req),
|
||||
types.ToResponseCheckTx(res),
|
||||
)
|
||||
}
|
||||
@@ -184,19 +184,19 @@ func (app *localClient) SetOptionSync(req types.RequestSetOption) (*types.Respon
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) {
|
||||
func (app *localClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.DeliverTx(tx)
|
||||
res := app.Application.DeliverTx(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) {
|
||||
func (app *localClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.CheckTx(tx)
|
||||
res := app.Application.CheckTx(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -5,13 +5,16 @@ import (
|
||||
"container/list"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/libs/timer"
|
||||
)
|
||||
|
||||
const reqQueueSize = 256 // TODO make configurable
|
||||
@@ -24,45 +27,42 @@ var _ Client = (*socketClient)(nil)
|
||||
// the application in general is not meant to be interfaced
|
||||
// with concurrent callers.
|
||||
type socketClient struct {
|
||||
cmn.BaseService
|
||||
service.BaseService
|
||||
|
||||
reqQueue chan *ReqRes
|
||||
flushTimer *cmn.ThrottleTimer
|
||||
addr string
|
||||
mustConnect bool
|
||||
conn net.Conn
|
||||
|
||||
reqQueue chan *ReqRes
|
||||
flushTimer *timer.ThrottleTimer
|
||||
|
||||
mtx sync.Mutex
|
||||
addr string
|
||||
conn net.Conn
|
||||
err error
|
||||
reqSent *list.List
|
||||
resCb func(*types.Request, *types.Response) // listens to all callbacks
|
||||
reqSent *list.List // list of requests sent, waiting for response
|
||||
resCb func(*types.Request, *types.Response) // called on all requests, if set.
|
||||
|
||||
}
|
||||
|
||||
func NewSocketClient(addr string, mustConnect bool) *socketClient {
|
||||
cli := &socketClient{
|
||||
reqQueue: make(chan *ReqRes, reqQueueSize),
|
||||
flushTimer: cmn.NewThrottleTimer("socketClient", flushThrottleMS),
|
||||
flushTimer: timer.NewThrottleTimer("socketClient", flushThrottleMS),
|
||||
mustConnect: mustConnect,
|
||||
|
||||
addr: addr,
|
||||
reqSent: list.New(),
|
||||
resCb: nil,
|
||||
}
|
||||
cli.BaseService = *cmn.NewBaseService(nil, "socketClient", cli)
|
||||
cli.BaseService = *service.NewBaseService(nil, "socketClient", cli)
|
||||
return cli
|
||||
}
|
||||
|
||||
func (cli *socketClient) OnStart() error {
|
||||
if err := cli.BaseService.OnStart(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var err error
|
||||
var conn net.Conn
|
||||
RETRY_LOOP:
|
||||
for {
|
||||
conn, err = cmn.Connect(cli.addr)
|
||||
conn, err = tmnet.Connect(cli.addr)
|
||||
if err != nil {
|
||||
if cli.mustConnect {
|
||||
return err
|
||||
@@ -81,14 +81,12 @@ RETRY_LOOP:
|
||||
}
|
||||
|
||||
func (cli *socketClient) OnStop() {
|
||||
cli.BaseService.OnStop()
|
||||
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
if cli.conn != nil {
|
||||
cli.conn.Close()
|
||||
}
|
||||
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.flushQueue()
|
||||
}
|
||||
|
||||
@@ -124,7 +122,7 @@ func (cli *socketClient) SetResponseCallback(resCb Callback) {
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) sendRequestsRoutine(conn net.Conn) {
|
||||
func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
|
||||
|
||||
w := bufio.NewWriter(conn)
|
||||
for {
|
||||
@@ -141,14 +139,14 @@ func (cli *socketClient) sendRequestsRoutine(conn net.Conn) {
|
||||
cli.willSendReq(reqres)
|
||||
err := types.WriteMessage(reqres.Request, w)
|
||||
if err != nil {
|
||||
cli.StopForError(fmt.Errorf("Error writing msg: %v", err))
|
||||
cli.StopForError(fmt.Errorf("error writing msg: %v", err))
|
||||
return
|
||||
}
|
||||
// cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request)
|
||||
if _, ok := reqres.Request.Value.(*types.Request_Flush); ok {
|
||||
err = w.Flush()
|
||||
if err != nil {
|
||||
cli.StopForError(fmt.Errorf("Error flushing writer: %v", err))
|
||||
cli.StopForError(fmt.Errorf("error flushing writer: %v", err))
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -156,7 +154,7 @@ func (cli *socketClient) sendRequestsRoutine(conn net.Conn) {
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *socketClient) recvResponseRoutine(conn net.Conn) {
|
||||
func (cli *socketClient) recvResponseRoutine(conn io.Reader) {
|
||||
|
||||
r := bufio.NewReader(conn) // Buffer reads
|
||||
for {
|
||||
@@ -195,11 +193,11 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
// Get the first ReqRes
|
||||
next := cli.reqSent.Front()
|
||||
if next == nil {
|
||||
return fmt.Errorf("Unexpected result type %v when nothing expected", reflect.TypeOf(res.Value))
|
||||
return fmt.Errorf("unexpected result type %v when nothing expected", reflect.TypeOf(res.Value))
|
||||
}
|
||||
reqres := next.Value.(*ReqRes)
|
||||
if !resMatchesReq(reqres.Request, res) {
|
||||
return fmt.Errorf("Unexpected result type %v when response to %v expected",
|
||||
return fmt.Errorf("unexpected result type %v when response to %v expected",
|
||||
reflect.TypeOf(res.Value), reflect.TypeOf(reqres.Request.Value))
|
||||
}
|
||||
|
||||
@@ -207,16 +205,18 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
reqres.Done() // Release waiters
|
||||
cli.reqSent.Remove(next) // Pop first item from linked list
|
||||
|
||||
// Notify reqRes listener if set
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(res)
|
||||
}
|
||||
|
||||
// Notify client listener if set
|
||||
// Notify client listener if set (global callback).
|
||||
if cli.resCb != nil {
|
||||
cli.resCb(reqres.Request, res)
|
||||
}
|
||||
|
||||
// Notify reqRes listener if set (request specific callback).
|
||||
// NOTE: it is possible this callback isn't set on the reqres object.
|
||||
// at this point, in which case it will be called after, when it is set.
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(res)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -238,12 +238,12 @@ func (cli *socketClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestSetOption(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxAsync(tx []byte) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestDeliverTx(tx))
|
||||
func (cli *socketClient) DeliverTxAsync(req types.RequestDeliverTx) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestDeliverTx(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxAsync(tx []byte) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestCheckTx(tx))
|
||||
func (cli *socketClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestCheckTx(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) QueryAsync(req types.RequestQuery) *ReqRes {
|
||||
@@ -295,14 +295,14 @@ func (cli *socketClient) SetOptionSync(req types.RequestSetOption) (*types.Respo
|
||||
return reqres.Response.GetSetOption(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestDeliverTx(tx))
|
||||
func (cli *socketClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestDeliverTx(req))
|
||||
cli.FlushSync()
|
||||
return reqres.Response.GetDeliverTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestCheckTx(tx))
|
||||
func (cli *socketClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestCheckTx(req))
|
||||
cli.FlushSync()
|
||||
return reqres.Response.GetCheckTx(), cli.Error()
|
||||
}
|
||||
|
||||
@@ -12,7 +12,8 @@ import (
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
func TestSocketClientStopForErrorDeadlock(t *testing.T) {
|
||||
@@ -94,9 +95,9 @@ func TestHangingSyncCalls(t *testing.T) {
|
||||
}
|
||||
|
||||
func setupClientServer(t *testing.T, app types.Application) (
|
||||
cmn.Service, abcicli.Client) {
|
||||
service.Service, abcicli.Client) {
|
||||
// some port between 20k and 30k
|
||||
port := 20000 + cmn.RandInt32()%10000
|
||||
port := 20000 + tmrand.Int32()%10000
|
||||
addr := fmt.Sprintf("localhost:%d", port)
|
||||
|
||||
s, err := server.NewServer(addr, "socket", app)
|
||||
|
||||
@@ -11,8 +11,8 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
@@ -111,16 +111,28 @@ func Execute() error {
|
||||
}
|
||||
|
||||
func addGlobalFlags() {
|
||||
RootCmd.PersistentFlags().StringVarP(&flagAddress, "address", "", "tcp://0.0.0.0:26658", "address of application socket")
|
||||
RootCmd.PersistentFlags().StringVarP(&flagAddress,
|
||||
"address",
|
||||
"",
|
||||
"tcp://0.0.0.0:26658",
|
||||
"address of application socket")
|
||||
RootCmd.PersistentFlags().StringVarP(&flagAbci, "abci", "", "socket", "either socket or grpc")
|
||||
RootCmd.PersistentFlags().BoolVarP(&flagVerbose, "verbose", "v", false, "print the command and results as if it were a console session")
|
||||
RootCmd.PersistentFlags().BoolVarP(&flagVerbose,
|
||||
"verbose",
|
||||
"v",
|
||||
false,
|
||||
"print the command and results as if it were a console session")
|
||||
RootCmd.PersistentFlags().StringVarP(&flagLogLevel, "log_level", "", "debug", "set the logger level")
|
||||
}
|
||||
|
||||
func addQueryFlags() {
|
||||
queryCmd.PersistentFlags().StringVarP(&flagPath, "path", "", "/store", "path to prefix query with")
|
||||
queryCmd.PersistentFlags().IntVarP(&flagHeight, "height", "", 0, "height to query the blockchain at")
|
||||
queryCmd.PersistentFlags().BoolVarP(&flagProve, "prove", "", false, "whether or not to return a merkle proof of the query result")
|
||||
queryCmd.PersistentFlags().BoolVarP(&flagProve,
|
||||
"prove",
|
||||
"",
|
||||
false,
|
||||
"whether or not to return a merkle proof of the query result")
|
||||
}
|
||||
|
||||
func addCounterFlags() {
|
||||
@@ -174,9 +186,7 @@ where example.file looks something like:
|
||||
info
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdBatch(cmd, args)
|
||||
},
|
||||
RunE: cmdBatch,
|
||||
}
|
||||
|
||||
var consoleCmd = &cobra.Command{
|
||||
@@ -189,9 +199,7 @@ without opening a new connection each time
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
ValidArgs: []string{"echo", "info", "set_option", "deliver_tx", "check_tx", "commit", "query"},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdConsole(cmd, args)
|
||||
},
|
||||
RunE: cmdConsole,
|
||||
}
|
||||
|
||||
var echoCmd = &cobra.Command{
|
||||
@@ -199,27 +207,21 @@ var echoCmd = &cobra.Command{
|
||||
Short: "have the application echo a message",
|
||||
Long: "have the application echo a message",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdEcho(cmd, args)
|
||||
},
|
||||
RunE: cmdEcho,
|
||||
}
|
||||
var infoCmd = &cobra.Command{
|
||||
Use: "info",
|
||||
Short: "get some info about the application",
|
||||
Long: "get some info about the application",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdInfo(cmd, args)
|
||||
},
|
||||
RunE: cmdInfo,
|
||||
}
|
||||
var setOptionCmd = &cobra.Command{
|
||||
Use: "set_option",
|
||||
Short: "set an option on the application",
|
||||
Long: "set an option on the application",
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdSetOption(cmd, args)
|
||||
},
|
||||
RunE: cmdSetOption,
|
||||
}
|
||||
|
||||
var deliverTxCmd = &cobra.Command{
|
||||
@@ -227,9 +229,7 @@ var deliverTxCmd = &cobra.Command{
|
||||
Short: "deliver a new transaction to the application",
|
||||
Long: "deliver a new transaction to the application",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdDeliverTx(cmd, args)
|
||||
},
|
||||
RunE: cmdDeliverTx,
|
||||
}
|
||||
|
||||
var checkTxCmd = &cobra.Command{
|
||||
@@ -237,9 +237,7 @@ var checkTxCmd = &cobra.Command{
|
||||
Short: "validate a transaction",
|
||||
Long: "validate a transaction",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdCheckTx(cmd, args)
|
||||
},
|
||||
RunE: cmdCheckTx,
|
||||
}
|
||||
|
||||
var commitCmd = &cobra.Command{
|
||||
@@ -247,9 +245,7 @@ var commitCmd = &cobra.Command{
|
||||
Short: "commit the application state and return the Merkle root hash",
|
||||
Long: "commit the application state and return the Merkle root hash",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdCommit(cmd, args)
|
||||
},
|
||||
RunE: cmdCommit,
|
||||
}
|
||||
|
||||
var versionCmd = &cobra.Command{
|
||||
@@ -268,9 +264,7 @@ var queryCmd = &cobra.Command{
|
||||
Short: "query the application state",
|
||||
Long: "query the application state",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdQuery(cmd, args)
|
||||
},
|
||||
RunE: cmdQuery,
|
||||
}
|
||||
|
||||
var counterCmd = &cobra.Command{
|
||||
@@ -278,9 +272,7 @@ var counterCmd = &cobra.Command{
|
||||
Short: "ABCI demo example",
|
||||
Long: "ABCI demo example",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdCounter(cmd, args)
|
||||
},
|
||||
RunE: cmdCounter,
|
||||
}
|
||||
|
||||
var kvstoreCmd = &cobra.Command{
|
||||
@@ -288,9 +280,7 @@ var kvstoreCmd = &cobra.Command{
|
||||
Short: "ABCI demo example",
|
||||
Long: "ABCI demo example",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdKVStore(cmd, args)
|
||||
},
|
||||
RunE: cmdKVStore,
|
||||
}
|
||||
|
||||
var testCmd = &cobra.Command{
|
||||
@@ -298,9 +288,7 @@ var testCmd = &cobra.Command{
|
||||
Short: "run integration tests",
|
||||
Long: "run integration tests",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdTest(cmd, args)
|
||||
},
|
||||
RunE: cmdTest,
|
||||
}
|
||||
|
||||
// Generates new Args array based off of previous call args to maintain flag persistence
|
||||
@@ -322,14 +310,14 @@ func persistentArgs(line []byte) []string {
|
||||
func compose(fs []func() error) error {
|
||||
if len(fs) == 0 {
|
||||
return nil
|
||||
} else {
|
||||
err := fs[0]()
|
||||
if err == nil {
|
||||
return compose(fs[1:])
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := fs[0]()
|
||||
if err == nil {
|
||||
return compose(fs[1:])
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func cmdTest(cmd *cobra.Command, args []string) error {
|
||||
@@ -356,16 +344,18 @@ func cmdTest(cmd *cobra.Command, args []string) error {
|
||||
|
||||
func cmdBatch(cmd *cobra.Command, args []string) error {
|
||||
bufReader := bufio.NewReader(os.Stdin)
|
||||
LOOP:
|
||||
for {
|
||||
|
||||
line, more, err := bufReader.ReadLine()
|
||||
if more {
|
||||
return errors.New("Input line is too long")
|
||||
} else if err == io.EOF {
|
||||
break
|
||||
} else if len(line) == 0 {
|
||||
switch {
|
||||
case more:
|
||||
return errors.New("input line is too long")
|
||||
case err == io.EOF:
|
||||
break LOOP
|
||||
case len(line) == 0:
|
||||
continue
|
||||
} else if err != nil {
|
||||
case err != nil:
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -384,7 +374,7 @@ func cmdConsole(cmd *cobra.Command, args []string) error {
|
||||
bufReader := bufio.NewReader(os.Stdin)
|
||||
line, more, err := bufReader.ReadLine()
|
||||
if more {
|
||||
return errors.New("Input is too long")
|
||||
return errors.New("input is too long")
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -394,7 +384,6 @@ func cmdConsole(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
|
||||
@@ -420,7 +409,7 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
|
||||
}
|
||||
|
||||
// otherwise, we need to skip the next one too
|
||||
i += 1
|
||||
i++
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -547,7 +536,7 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.DeliverTxSync(txBytes)
|
||||
res, err := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -573,7 +562,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.CheckTxSync(txBytes)
|
||||
res, err := client.CheckTxSync(types.RequestCheckTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -637,9 +626,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
|
||||
app := counter.NewCounterApplication(flagSerial)
|
||||
|
||||
app := counter.NewApplication(flagSerial)
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
// Start the listener
|
||||
@@ -652,12 +639,14 @@ func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait forever
|
||||
cmn.TrapSignal(func() {
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
srv.Stop()
|
||||
})
|
||||
return nil
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
}
|
||||
|
||||
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
@@ -666,7 +655,7 @@ func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
// Create the application - in memory or persisted to disk
|
||||
var app types.Application
|
||||
if flagPersist == "" {
|
||||
app = kvstore.NewKVStoreApplication()
|
||||
app = kvstore.NewApplication()
|
||||
} else {
|
||||
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
|
||||
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
|
||||
@@ -682,12 +671,14 @@ func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait forever
|
||||
cmn.TrapSignal(func() {
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
srv.Stop()
|
||||
})
|
||||
return nil
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
@@ -739,14 +730,14 @@ func stringOrHexToBytes(s string) ([]byte, error) {
|
||||
if len(s) > 2 && strings.ToLower(s[:2]) == "0x" {
|
||||
b, err := hex.DecodeString(s[2:])
|
||||
if err != nil {
|
||||
err = fmt.Errorf("Error decoding hex argument: %s", err.Error())
|
||||
err = fmt.Errorf("error decoding hex argument: %s", err.Error())
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
if !strings.HasPrefix(s, "\"") || !strings.HasSuffix(s, "\"") {
|
||||
err := fmt.Errorf("Invalid string arg: \"%s\". Must be quoted or a \"0x\"-prefixed hex string", s)
|
||||
err := fmt.Errorf("invalid string arg: \"%s\". Must be quoted or a \"0x\"-prefixed hex string", s)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
type CounterApplication struct {
|
||||
type Application struct {
|
||||
types.BaseApplication
|
||||
|
||||
hashCount int
|
||||
@@ -16,15 +16,15 @@ type CounterApplication struct {
|
||||
serial bool
|
||||
}
|
||||
|
||||
func NewCounterApplication(serial bool) *CounterApplication {
|
||||
return &CounterApplication{serial: serial}
|
||||
func NewApplication(serial bool) *Application {
|
||||
return &Application{serial: serial}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption {
|
||||
func (app *Application) SetOption(req types.RequestSetOption) types.ResponseSetOption {
|
||||
key, value := req.Key, req.Value
|
||||
if key == "serial" && value == "on" {
|
||||
app.serial = true
|
||||
@@ -42,15 +42,15 @@ func (app *CounterApplication) SetOption(req types.RequestSetOption) types.Respo
|
||||
return types.ResponseSetOption{}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
if app.serial {
|
||||
if len(tx) > 8 {
|
||||
if len(req.Tx) > 8 {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))}
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(tx):], tx)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue != uint64(app.txCount) {
|
||||
return types.ResponseDeliverTx{
|
||||
@@ -62,15 +62,15 @@ func (app *CounterApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) CheckTx(tx []byte) types.ResponseCheckTx {
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
if app.serial {
|
||||
if len(tx) > 8 {
|
||||
if len(req.Tx) > 8 {
|
||||
return types.ResponseCheckTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))}
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(tx):], tx)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue < uint64(app.txCount) {
|
||||
return types.ResponseCheckTx{
|
||||
@@ -81,7 +81,7 @@ func (app *CounterApplication) CheckTx(tx []byte) types.ResponseCheckTx {
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) Commit() (resp types.ResponseCommit) {
|
||||
func (app *Application) Commit() (resp types.ResponseCommit) {
|
||||
app.hashCount++
|
||||
if app.txCount == 0 {
|
||||
return types.ResponseCommit{}
|
||||
@@ -91,7 +91,7 @@ func (app *CounterApplication) Commit() (resp types.ResponseCommit) {
|
||||
return types.ResponseCommit{Data: hash}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
switch reqQuery.Path {
|
||||
case "hash":
|
||||
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}
|
||||
|
||||
@@ -13,8 +13,8 @@ import (
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
|
||||
func TestKVStore(t *testing.T) {
|
||||
fmt.Println("### Testing KVStore")
|
||||
testStream(t, kvstore.NewKVStoreApplication())
|
||||
testStream(t, kvstore.NewApplication())
|
||||
}
|
||||
|
||||
func TestBaseApp(t *testing.T) {
|
||||
@@ -87,7 +87,7 @@ func testStream(t *testing.T, app types.Application) {
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
// Send request
|
||||
reqRes := client.DeliverTxAsync([]byte("test"))
|
||||
reqRes := client.DeliverTxAsync(types.RequestDeliverTx{Tx: []byte("test")})
|
||||
_ = reqRes
|
||||
// check err ?
|
||||
|
||||
@@ -107,11 +107,11 @@ func testStream(t *testing.T, app types.Application) {
|
||||
//-------------------------
|
||||
// test grpc
|
||||
|
||||
func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
return cmn.Connect(addr)
|
||||
func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return tmnet.Connect(addr)
|
||||
}
|
||||
|
||||
func testGRPCSync(t *testing.T, app *types.GRPCApplication) {
|
||||
func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
|
||||
numDeliverTxs := 2000
|
||||
|
||||
// Start the listener
|
||||
@@ -123,7 +123,7 @@ func testGRPCSync(t *testing.T, app *types.GRPCApplication) {
|
||||
defer server.Stop()
|
||||
|
||||
// Connect to the socket
|
||||
conn, err := grpc.Dial("unix://test.sock", grpc.WithInsecure(), grpc.WithDialer(dialerFunc))
|
||||
conn, err := grpc.Dial("unix://test.sock", grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
t.Fatalf("Error dialing GRPC server: %v", err.Error())
|
||||
}
|
||||
|
||||
@@ -22,10 +22,10 @@ and the Handshake allows any necessary blocks to be replayed.
|
||||
Validator set changes are effected using the following transaction format:
|
||||
|
||||
```
|
||||
val:pubkey1/power1,addr2/power2,addr3/power3"
|
||||
"val:pubkey1!power1,pubkey2!power2,pubkey3!power3"
|
||||
```
|
||||
|
||||
where `power1` is the new voting power for the validator with `pubkey1` (possibly a new one).
|
||||
where `pubkeyN` is a base64-encoded 32-byte ed25519 key and `powerN` is a new voting power for the validator with `pubkeyN` (possibly a new one).
|
||||
To remove a validator from the validator set, set power to `0`.
|
||||
There is no sybil protection against new validators joining.
|
||||
Validators can be removed by setting their power to `0`.
|
||||
|
||||
|
||||
@@ -2,14 +2,14 @@ package kvstore
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
)
|
||||
|
||||
// RandVal creates one random validator, with a key derived
|
||||
// from the input value
|
||||
func RandVal(i int) types.ValidatorUpdate {
|
||||
pubkey := cmn.RandBytes(32)
|
||||
power := cmn.RandUint16() + 1
|
||||
pubkey := tmrand.Bytes(32)
|
||||
power := tmrand.Uint16() + 1
|
||||
v := types.Ed25519ValidatorUpdate(pubkey, int64(power))
|
||||
return v
|
||||
}
|
||||
|
||||
@@ -8,9 +8,9 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/kv"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -28,15 +28,19 @@ type State struct {
|
||||
}
|
||||
|
||||
func loadState(db dbm.DB) State {
|
||||
stateBytes := db.Get(stateKey)
|
||||
var state State
|
||||
if len(stateBytes) != 0 {
|
||||
err := json.Unmarshal(stateBytes, &state)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
state.db = db
|
||||
stateBytes, err := db.Get(stateKey)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if len(stateBytes) == 0 {
|
||||
return state
|
||||
}
|
||||
err = json.Unmarshal(stateBytes, &state)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return state
|
||||
}
|
||||
|
||||
@@ -54,81 +58,99 @@ func prefixKey(key []byte) []byte {
|
||||
|
||||
//---------------------------------------------------
|
||||
|
||||
var _ types.Application = (*KVStoreApplication)(nil)
|
||||
var _ types.Application = (*Application)(nil)
|
||||
|
||||
type KVStoreApplication struct {
|
||||
type Application struct {
|
||||
types.BaseApplication
|
||||
|
||||
state State
|
||||
}
|
||||
|
||||
func NewKVStoreApplication() *KVStoreApplication {
|
||||
func NewApplication() *Application {
|
||||
state := loadState(dbm.NewMemDB())
|
||||
return &KVStoreApplication{state: state}
|
||||
return &Application{state: state}
|
||||
}
|
||||
|
||||
func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
|
||||
func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
|
||||
return types.ResponseInfo{
|
||||
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
|
||||
Version: version.ABCIVersion,
|
||||
AppVersion: ProtocolVersion.Uint64(),
|
||||
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
|
||||
Version: version.ABCIVersion,
|
||||
AppVersion: ProtocolVersion.Uint64(),
|
||||
LastBlockHeight: app.state.Height,
|
||||
LastBlockAppHash: app.state.AppHash,
|
||||
}
|
||||
}
|
||||
|
||||
// tx is either "key=value" or just arbitrary bytes
|
||||
func (app *KVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
var key, value []byte
|
||||
parts := bytes.Split(tx, []byte("="))
|
||||
parts := bytes.Split(req.Tx, []byte("="))
|
||||
if len(parts) == 2 {
|
||||
key, value = parts[0], parts[1]
|
||||
} else {
|
||||
key, value = tx, tx
|
||||
key, value = req.Tx, req.Tx
|
||||
}
|
||||
app.state.db.Set(prefixKey(key), value)
|
||||
app.state.Size += 1
|
||||
|
||||
tags := []cmn.KVPair{
|
||||
{Key: []byte("app.creator"), Value: []byte("Cosmoshi Netowoko")},
|
||||
{Key: []byte("app.key"), Value: key},
|
||||
app.state.db.Set(prefixKey(key), value)
|
||||
app.state.Size++
|
||||
|
||||
events := []types.Event{
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []kv.Pair{
|
||||
{Key: []byte("creator"), Value: []byte("Cosmoshi Netowoko")},
|
||||
{Key: []byte("key"), Value: key},
|
||||
},
|
||||
},
|
||||
}
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags}
|
||||
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
|
||||
}
|
||||
|
||||
func (app *KVStoreApplication) CheckTx(tx []byte) types.ResponseCheckTx {
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
|
||||
}
|
||||
|
||||
func (app *KVStoreApplication) Commit() types.ResponseCommit {
|
||||
func (app *Application) Commit() types.ResponseCommit {
|
||||
// Using a memdb - just return the big endian size of the db
|
||||
appHash := make([]byte, 8)
|
||||
binary.PutVarint(appHash, app.state.Size)
|
||||
app.state.AppHash = appHash
|
||||
app.state.Height += 1
|
||||
app.state.Height++
|
||||
saveState(app.state)
|
||||
return types.ResponseCommit{Data: appHash}
|
||||
}
|
||||
|
||||
func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
// Returns an associated value or nil if missing.
|
||||
func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
if reqQuery.Prove {
|
||||
value := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if value == nil {
|
||||
resQuery.Log = "does not exist"
|
||||
} else {
|
||||
resQuery.Log = "exists"
|
||||
}
|
||||
resQuery.Index = -1 // TODO make Proof return index
|
||||
resQuery.Key = reqQuery.Data
|
||||
resQuery.Value = value
|
||||
if value != nil {
|
||||
resQuery.Log = "exists"
|
||||
} else {
|
||||
resQuery.Log = "does not exist"
|
||||
}
|
||||
return
|
||||
} else {
|
||||
resQuery.Key = reqQuery.Data
|
||||
value := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
resQuery.Value = value
|
||||
if value != nil {
|
||||
resQuery.Log = "exists"
|
||||
} else {
|
||||
resQuery.Log = "does not exist"
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
resQuery.Key = reqQuery.Data
|
||||
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if value == nil {
|
||||
resQuery.Log = "does not exist"
|
||||
} else {
|
||||
resQuery.Log = "exists"
|
||||
}
|
||||
resQuery.Value = value
|
||||
|
||||
return resQuery
|
||||
}
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
@@ -18,11 +18,17 @@ import (
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
const (
|
||||
testKey = "abc"
|
||||
testValue = "def"
|
||||
)
|
||||
|
||||
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||
ar := app.DeliverTx(tx)
|
||||
req := types.RequestDeliverTx{Tx: tx}
|
||||
ar := app.DeliverTx(req)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// repeating tx doesn't raise error
|
||||
ar = app.DeliverTx(tx)
|
||||
ar = app.DeliverTx(req)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
|
||||
// make sure query is fine
|
||||
@@ -44,13 +50,13 @@ func testKVStore(t *testing.T, app types.Application, tx []byte, key, value stri
|
||||
}
|
||||
|
||||
func TestKVStoreKV(t *testing.T) {
|
||||
kvstore := NewKVStoreApplication()
|
||||
key := "abc"
|
||||
kvstore := NewApplication()
|
||||
key := testKey
|
||||
value := key
|
||||
tx := []byte(key)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
|
||||
value = "def"
|
||||
value = testValue
|
||||
tx = []byte(key + "=" + value)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
}
|
||||
@@ -61,12 +67,12 @@ func TestPersistentKVStoreKV(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvstore := NewPersistentKVStoreApplication(dir)
|
||||
key := "abc"
|
||||
key := testKey
|
||||
value := key
|
||||
tx := []byte(key)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
|
||||
value = "def"
|
||||
value = testValue
|
||||
tx = []byte(key + "=" + value)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
}
|
||||
@@ -89,7 +95,7 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
height = int64(1)
|
||||
hash := []byte("foo")
|
||||
header := types.Header{
|
||||
Height: int64(height),
|
||||
Height: height,
|
||||
}
|
||||
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
|
||||
kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
|
||||
@@ -147,7 +153,7 @@ func TestValUpdates(t *testing.T) {
|
||||
|
||||
makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3)
|
||||
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1])
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) // nolint: gocritic
|
||||
vals2 = kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
@@ -169,7 +175,12 @@ func TestValUpdates(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff []types.ValidatorUpdate, txs ...[]byte) {
|
||||
func makeApplyBlock(
|
||||
t *testing.T,
|
||||
kvstore types.Application,
|
||||
heightInt int,
|
||||
diff []types.ValidatorUpdate,
|
||||
txs ...[]byte) {
|
||||
// make and apply block
|
||||
height := int64(heightInt)
|
||||
hash := []byte("foo")
|
||||
@@ -179,7 +190,7 @@ func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff
|
||||
|
||||
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
|
||||
for _, tx := range txs {
|
||||
if r := kvstore.DeliverTx(tx); r.IsErr() {
|
||||
if r := kvstore.DeliverTx(types.RequestDeliverTx{Tx: tx}); r.IsErr() {
|
||||
t.Fatal(r)
|
||||
}
|
||||
}
|
||||
@@ -206,7 +217,7 @@ func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) {
|
||||
}
|
||||
}
|
||||
|
||||
func makeSocketClientServer(app types.Application, name string) (abcicli.Client, cmn.Service, error) {
|
||||
func makeSocketClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) {
|
||||
// Start the listener
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
logger := log.TestingLogger()
|
||||
@@ -228,7 +239,7 @@ func makeSocketClientServer(app types.Application, name string) (abcicli.Client,
|
||||
return client, server, nil
|
||||
}
|
||||
|
||||
func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, cmn.Service, error) {
|
||||
func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) {
|
||||
// Start the listener
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
logger := log.TestingLogger()
|
||||
@@ -251,7 +262,7 @@ func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, c
|
||||
|
||||
func TestClientServer(t *testing.T) {
|
||||
// set up socket app
|
||||
kvstore := NewKVStoreApplication()
|
||||
kvstore := NewApplication()
|
||||
client, server, err := makeSocketClientServer(kvstore, "kvstore-socket")
|
||||
require.Nil(t, err)
|
||||
defer server.Stop()
|
||||
@@ -260,7 +271,7 @@ func TestClientServer(t *testing.T) {
|
||||
runClientTests(t, client)
|
||||
|
||||
// set up grpc app
|
||||
kvstore = NewKVStoreApplication()
|
||||
kvstore = NewApplication()
|
||||
gclient, gserver, err := makeGRPCClientServer(kvstore, "kvstore-grpc")
|
||||
require.Nil(t, err)
|
||||
defer gserver.Stop()
|
||||
@@ -271,22 +282,22 @@ func TestClientServer(t *testing.T) {
|
||||
|
||||
func runClientTests(t *testing.T, client abcicli.Client) {
|
||||
// run some tests....
|
||||
key := "abc"
|
||||
key := testKey
|
||||
value := key
|
||||
tx := []byte(key)
|
||||
testClient(t, client, tx, key, value)
|
||||
|
||||
value = "def"
|
||||
value = testValue
|
||||
tx = []byte(key + "=" + value)
|
||||
testClient(t, client, tx, key, value)
|
||||
}
|
||||
|
||||
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
|
||||
ar, err := app.DeliverTxSync(tx)
|
||||
ar, err := app.DeliverTxSync(types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// repeating tx doesn't raise error
|
||||
ar, err = app.DeliverTxSync(tx)
|
||||
ar, err = app.DeliverTxSync(types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
|
||||
|
||||
@@ -2,15 +2,17 @@ package kvstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmtypes "github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -22,11 +24,13 @@ const (
|
||||
var _ types.Application = (*PersistentKVStoreApplication)(nil)
|
||||
|
||||
type PersistentKVStoreApplication struct {
|
||||
app *KVStoreApplication
|
||||
app *Application
|
||||
|
||||
// validator set
|
||||
ValUpdates []types.ValidatorUpdate
|
||||
|
||||
valAddrToPubKeyMap map[string]types.PubKey
|
||||
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
@@ -40,8 +44,9 @@ func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication
|
||||
state := loadState(db)
|
||||
|
||||
return &PersistentKVStoreApplication{
|
||||
app: &KVStoreApplication{state: state},
|
||||
logger: log.NewNopLogger(),
|
||||
app: &Application{state: state},
|
||||
valAddrToPubKeyMap: make(map[string]types.PubKey),
|
||||
logger: log.NewNopLogger(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -60,22 +65,22 @@ func (app *PersistentKVStoreApplication) SetOption(req types.RequestSetOption) t
|
||||
return app.app.SetOption(req)
|
||||
}
|
||||
|
||||
// tx is either "val:pubkey/power" or "key=value" or just arbitrary bytes
|
||||
func (app *PersistentKVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
|
||||
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
// if it starts with "val:", update the validator set
|
||||
// format is "val:pubkey/power"
|
||||
if isValidatorTx(tx) {
|
||||
// format is "val:pubkey!power"
|
||||
if isValidatorTx(req.Tx) {
|
||||
// update validators in the merkle tree
|
||||
// and in app.ValUpdates
|
||||
return app.execValidatorTx(tx)
|
||||
return app.execValidatorTx(req.Tx)
|
||||
}
|
||||
|
||||
// otherwise, update the key-value store
|
||||
return app.app.DeliverTx(tx)
|
||||
return app.app.DeliverTx(req)
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) CheckTx(tx []byte) types.ResponseCheckTx {
|
||||
return app.app.CheckTx(tx)
|
||||
func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
return app.app.CheckTx(req)
|
||||
}
|
||||
|
||||
// Commit will panic if InitChain was not called
|
||||
@@ -83,8 +88,23 @@ func (app *PersistentKVStoreApplication) Commit() types.ResponseCommit {
|
||||
return app.app.Commit()
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
return app.app.Query(reqQuery)
|
||||
// When path=/val and data={validator address}, returns the validator update (types.ValidatorUpdate) varint encoded.
|
||||
// For any other path, returns an associated value or nil if missing.
|
||||
func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
switch reqQuery.Path {
|
||||
case "/val":
|
||||
key := []byte("val:" + string(reqQuery.Data))
|
||||
value, err := app.app.state.db.Get(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
resQuery.Key = reqQuery.Data
|
||||
resQuery.Value = value
|
||||
return
|
||||
default:
|
||||
return app.app.Query(reqQuery)
|
||||
}
|
||||
}
|
||||
|
||||
// Save the validators in the merkle tree
|
||||
@@ -102,6 +122,19 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t
|
||||
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
// reset valset changes
|
||||
app.ValUpdates = make([]types.ValidatorUpdate, 0)
|
||||
|
||||
for _, ev := range req.ByzantineValidators {
|
||||
if ev.Type == tmtypes.ABCIEvidenceTypeDuplicateVote {
|
||||
// decrease voting power by 1
|
||||
if ev.TotalVotingPower == 0 {
|
||||
continue
|
||||
}
|
||||
app.updateValidator(types.ValidatorUpdate{
|
||||
PubKey: app.valAddrToPubKeyMap[string(ev.Validator.Address)],
|
||||
Power: ev.TotalVotingPower - 1,
|
||||
})
|
||||
}
|
||||
}
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
@@ -114,7 +147,10 @@ func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) typ
|
||||
// update validators
|
||||
|
||||
func (app *PersistentKVStoreApplication) Validators() (validators []types.ValidatorUpdate) {
|
||||
itr := app.app.state.db.Iterator(nil, nil)
|
||||
itr, err := app.app.state.db.Iterator(nil, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for ; itr.Valid(); itr.Next() {
|
||||
if isValidatorTx(itr.Key()) {
|
||||
validator := new(types.ValidatorUpdate)
|
||||
@@ -129,33 +165,34 @@ func (app *PersistentKVStoreApplication) Validators() (validators []types.Valida
|
||||
}
|
||||
|
||||
func MakeValSetChangeTx(pubkey types.PubKey, power int64) []byte {
|
||||
return []byte(fmt.Sprintf("val:%X/%d", pubkey.Data, power))
|
||||
pubStr := base64.StdEncoding.EncodeToString(pubkey.Data)
|
||||
return []byte(fmt.Sprintf("val:%s!%d", pubStr, power))
|
||||
}
|
||||
|
||||
func isValidatorTx(tx []byte) bool {
|
||||
return strings.HasPrefix(string(tx), ValidatorSetChangePrefix)
|
||||
}
|
||||
|
||||
// format is "val:pubkey/power"
|
||||
// pubkey is raw 32-byte ed25519 key
|
||||
// format is "val:pubkey!power"
|
||||
// pubkey is a base64-encoded 32-byte ed25519 key
|
||||
func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.ResponseDeliverTx {
|
||||
tx = tx[len(ValidatorSetChangePrefix):]
|
||||
|
||||
//get the pubkey and power
|
||||
pubKeyAndPower := strings.Split(string(tx), "/")
|
||||
pubKeyAndPower := strings.Split(string(tx), "!")
|
||||
if len(pubKeyAndPower) != 2 {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Expected 'pubkey/power'. Got %v", pubKeyAndPower)}
|
||||
Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)}
|
||||
}
|
||||
pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1]
|
||||
|
||||
// decode the pubkey
|
||||
pubkey, err := hex.DecodeString(pubkeyS)
|
||||
pubkey, err := base64.StdEncoding.DecodeString(pubkeyS)
|
||||
if err != nil {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Pubkey (%s) is invalid hex", pubkeyS)}
|
||||
Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)}
|
||||
}
|
||||
|
||||
// decode the power
|
||||
@@ -167,20 +204,30 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon
|
||||
}
|
||||
|
||||
// update
|
||||
return app.updateValidator(types.Ed25519ValidatorUpdate(pubkey, int64(power)))
|
||||
return app.updateValidator(types.Ed25519ValidatorUpdate(pubkey, power))
|
||||
}
|
||||
|
||||
// add, update, or remove a validator
|
||||
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx {
|
||||
key := []byte("val:" + string(v.PubKey.Data))
|
||||
|
||||
pubkey := ed25519.PubKeyEd25519{}
|
||||
copy(pubkey[:], v.PubKey.Data)
|
||||
|
||||
if v.Power == 0 {
|
||||
// remove validator
|
||||
if !app.app.state.db.Has(key) {
|
||||
hasKey, err := app.app.state.db.Has(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if !hasKey {
|
||||
pubStr := base64.StdEncoding.EncodeToString(v.PubKey.Data)
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeUnauthorized,
|
||||
Log: fmt.Sprintf("Cannot remove non-existent validator %X", key)}
|
||||
Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)}
|
||||
}
|
||||
app.app.state.db.Delete(key)
|
||||
delete(app.valAddrToPubKeyMap, string(pubkey.Address()))
|
||||
} else {
|
||||
// add or update validator
|
||||
value := bytes.NewBuffer(make([]byte, 0))
|
||||
@@ -190,6 +237,7 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
|
||||
Log: fmt.Sprintf("Error encoding validator: %v", err)}
|
||||
}
|
||||
app.app.state.db.Set(key, value.Bytes())
|
||||
app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey
|
||||
}
|
||||
|
||||
// we only update the changes array if we successfully updated the tree
|
||||
|
||||
@@ -6,11 +6,12 @@ import (
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
type GRPCServer struct {
|
||||
cmn.BaseService
|
||||
service.BaseService
|
||||
|
||||
proto string
|
||||
addr string
|
||||
@@ -21,15 +22,15 @@ type GRPCServer struct {
|
||||
}
|
||||
|
||||
// NewGRPCServer returns a new gRPC ABCI server
|
||||
func NewGRPCServer(protoAddr string, app types.ABCIApplicationServer) cmn.Service {
|
||||
proto, addr := cmn.ProtocolAndAddress(protoAddr)
|
||||
func NewGRPCServer(protoAddr string, app types.ABCIApplicationServer) service.Service {
|
||||
proto, addr := tmnet.ProtocolAndAddress(protoAddr)
|
||||
s := &GRPCServer{
|
||||
proto: proto,
|
||||
addr: addr,
|
||||
listener: nil,
|
||||
app: app,
|
||||
}
|
||||
s.BaseService = *cmn.NewBaseService(nil, "ABCIServer", s)
|
||||
s.BaseService = *service.NewBaseService(nil, "ABCIServer", s)
|
||||
return s
|
||||
}
|
||||
|
||||
|
||||
@@ -6,18 +6,17 @@ It contains two server implementation:
|
||||
* socket server
|
||||
|
||||
*/
|
||||
|
||||
package server
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
func NewServer(protoAddr, transport string, app types.Application) (cmn.Service, error) {
|
||||
var s cmn.Service
|
||||
func NewServer(protoAddr, transport string, app types.Application) (service.Service, error) {
|
||||
var s service.Service
|
||||
var err error
|
||||
switch transport {
|
||||
case "socket":
|
||||
@@ -25,7 +24,7 @@ func NewServer(protoAddr, transport string, app types.Application) (cmn.Service,
|
||||
case "grpc":
|
||||
s = NewGRPCServer(protoAddr, types.NewGRPCApplication(app))
|
||||
default:
|
||||
err = fmt.Errorf("Unknown server type %s", transport)
|
||||
err = fmt.Errorf("unknown server type %s", transport)
|
||||
}
|
||||
return s, err
|
||||
}
|
||||
|
||||
@@ -8,13 +8,14 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
// var maxNumberConnections = 2
|
||||
|
||||
type SocketServer struct {
|
||||
cmn.BaseService
|
||||
service.BaseService
|
||||
|
||||
proto string
|
||||
addr string
|
||||
@@ -28,8 +29,8 @@ type SocketServer struct {
|
||||
app types.Application
|
||||
}
|
||||
|
||||
func NewSocketServer(protoAddr string, app types.Application) cmn.Service {
|
||||
proto, addr := cmn.ProtocolAndAddress(protoAddr)
|
||||
func NewSocketServer(protoAddr string, app types.Application) service.Service {
|
||||
proto, addr := tmnet.ProtocolAndAddress(protoAddr)
|
||||
s := &SocketServer{
|
||||
proto: proto,
|
||||
addr: addr,
|
||||
@@ -37,7 +38,7 @@ func NewSocketServer(protoAddr string, app types.Application) cmn.Service {
|
||||
app: app,
|
||||
conns: make(map[int]net.Conn),
|
||||
}
|
||||
s.BaseService = *cmn.NewBaseService(nil, "ABCIServer", s)
|
||||
s.BaseService = *service.NewBaseService(nil, "ABCIServer", s)
|
||||
return s
|
||||
}
|
||||
|
||||
@@ -88,7 +89,7 @@ func (s *SocketServer) rmConn(connID int) error {
|
||||
|
||||
conn, ok := s.conns[connID]
|
||||
if !ok {
|
||||
return fmt.Errorf("Connection %d does not exist", connID)
|
||||
return fmt.Errorf("connection %d does not exist", connID)
|
||||
}
|
||||
|
||||
delete(s.conns, connID)
|
||||
@@ -127,11 +128,12 @@ func (s *SocketServer) acceptConnectionsRoutine() {
|
||||
|
||||
func (s *SocketServer) waitForClose(closeConn chan error, connID int) {
|
||||
err := <-closeConn
|
||||
if err == io.EOF {
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
s.Logger.Error("Connection was closed by client")
|
||||
} else if err != nil {
|
||||
case err != nil:
|
||||
s.Logger.Error("Connection error", "error", err)
|
||||
} else {
|
||||
default:
|
||||
// never happens
|
||||
s.Logger.Error("Connection was closed.")
|
||||
}
|
||||
@@ -143,9 +145,19 @@ func (s *SocketServer) waitForClose(closeConn chan error, connID int) {
|
||||
}
|
||||
|
||||
// Read requests from conn and deal with them
|
||||
func (s *SocketServer) handleRequests(closeConn chan error, conn net.Conn, responses chan<- *types.Response) {
|
||||
func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, responses chan<- *types.Response) {
|
||||
var count int
|
||||
var bufReader = bufio.NewReader(conn)
|
||||
|
||||
defer func() {
|
||||
// make sure to recover from any app-related panics to allow proper socket cleanup
|
||||
r := recover()
|
||||
if r != nil {
|
||||
closeConn <- fmt.Errorf("recovered from panic: %v", r)
|
||||
s.appMtx.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
|
||||
var req = &types.Request{}
|
||||
@@ -154,7 +166,7 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn net.Conn, respo
|
||||
if err == io.EOF {
|
||||
closeConn <- err
|
||||
} else {
|
||||
closeConn <- fmt.Errorf("Error reading message: %v", err.Error())
|
||||
closeConn <- fmt.Errorf("error reading message: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -178,10 +190,10 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
res := s.app.SetOption(*r.SetOption)
|
||||
responses <- types.ToResponseSetOption(res)
|
||||
case *types.Request_DeliverTx:
|
||||
res := s.app.DeliverTx(r.DeliverTx.Tx)
|
||||
res := s.app.DeliverTx(*r.DeliverTx)
|
||||
responses <- types.ToResponseDeliverTx(res)
|
||||
case *types.Request_CheckTx:
|
||||
res := s.app.CheckTx(r.CheckTx.Tx)
|
||||
res := s.app.CheckTx(*r.CheckTx)
|
||||
responses <- types.ToResponseCheckTx(res)
|
||||
case *types.Request_Commit:
|
||||
res := s.app.Commit()
|
||||
@@ -204,20 +216,20 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
}
|
||||
|
||||
// Pull responses from 'responses' and write them to conn.
|
||||
func (s *SocketServer) handleResponses(closeConn chan error, conn net.Conn, responses <-chan *types.Response) {
|
||||
func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, responses <-chan *types.Response) {
|
||||
var count int
|
||||
var bufWriter = bufio.NewWriter(conn)
|
||||
for {
|
||||
var res = <-responses
|
||||
err := types.WriteMessage(res, bufWriter)
|
||||
if err != nil {
|
||||
closeConn <- fmt.Errorf("Error writing message: %v", err.Error())
|
||||
closeConn <- fmt.Errorf("error writing message: %v", err.Error())
|
||||
return
|
||||
}
|
||||
if _, ok := res.Value.(*types.Response_Flush); ok {
|
||||
err = bufWriter.Flush()
|
||||
if err != nil {
|
||||
closeConn <- fmt.Errorf("Error flushing write buffer: %v", err.Error())
|
||||
closeConn <- fmt.Errorf("error flushing write buffer: %v", err.Error())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,12 +6,12 @@ import (
|
||||
"log"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
conn, err := cmn.Connect("unix://test.sock")
|
||||
conn, err := tmnet.Connect("unix://test.sock")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
|
||||
@@ -3,17 +3,17 @@ package main
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"reflect"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
conn, err := cmn.Connect("unix://test.sock")
|
||||
conn, err := tmnet.Connect("unix://test.sock")
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
@@ -33,7 +33,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
func makeRequest(conn net.Conn, req *types.Request) (*types.Response, error) {
|
||||
func makeRequest(conn io.ReadWriter, req *types.Request) (*types.Response, error) {
|
||||
var bufWriter = bufio.NewWriter(conn)
|
||||
|
||||
// Write desired request
|
||||
@@ -62,7 +62,7 @@ func makeRequest(conn net.Conn, req *types.Request) (*types.Response, error) {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := resFlush.Value.(*types.Response_Flush); !ok {
|
||||
return nil, fmt.Errorf("Expected flush response but got something else: %v", reflect.TypeOf(resFlush))
|
||||
return nil, fmt.Errorf("expected flush response but got something else: %v", reflect.TypeOf(resFlush))
|
||||
}
|
||||
|
||||
return res, nil
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
func TestClientServerNoAddrPrefix(t *testing.T) {
|
||||
addr := "localhost:26658"
|
||||
transport := "socket"
|
||||
app := kvstore.NewKVStoreApplication()
|
||||
app := kvstore.NewApplication()
|
||||
|
||||
server, err := abciserver.NewServer(addr, transport, app)
|
||||
assert.NoError(t, err, "expected no error on NewServer")
|
||||
|
||||
@@ -7,15 +7,15 @@ import (
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
)
|
||||
|
||||
func InitChain(client abcicli.Client) error {
|
||||
total := 10
|
||||
vals := make([]types.ValidatorUpdate, total)
|
||||
for i := 0; i < total; i++ {
|
||||
pubkey := cmn.RandBytes(33)
|
||||
power := cmn.RandInt()
|
||||
pubkey := tmrand.Bytes(33)
|
||||
power := tmrand.Int()
|
||||
vals[i] = types.Ed25519ValidatorUpdate(pubkey, int64(power))
|
||||
}
|
||||
_, err := client.InitChainSync(types.RequestInitChain{
|
||||
@@ -51,45 +51,45 @@ func Commit(client abcicli.Client, hashExp []byte) error {
|
||||
if !bytes.Equal(data, hashExp) {
|
||||
fmt.Println("Failed test: Commit")
|
||||
fmt.Printf("Commit hash was unexpected. Got %X expected %X\n", data, hashExp)
|
||||
return errors.New("CommitTx failed")
|
||||
return errors.New("commitTx failed")
|
||||
}
|
||||
fmt.Println("Passed test: Commit")
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.DeliverTxSync(txBytes)
|
||||
res, _ := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
|
||||
code, codeExp, log)
|
||||
return errors.New("DeliverTx error")
|
||||
return errors.New("deliverTx error")
|
||||
}
|
||||
if !bytes.Equal(data, dataExp) {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
|
||||
data, dataExp)
|
||||
return errors.New("DeliverTx error")
|
||||
return errors.New("deliverTx error")
|
||||
}
|
||||
fmt.Println("Passed test: DeliverTx")
|
||||
return nil
|
||||
}
|
||||
|
||||
func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.CheckTxSync(txBytes)
|
||||
res, _ := client.CheckTxSync(types.RequestCheckTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: CheckTx")
|
||||
fmt.Printf("CheckTx response code was unexpected. Got %v expected %v. Log: %v\n",
|
||||
code, codeExp, log)
|
||||
return errors.New("CheckTx")
|
||||
return errors.New("checkTx")
|
||||
}
|
||||
if !bytes.Equal(data, dataExp) {
|
||||
fmt.Println("Failed test: CheckTx")
|
||||
fmt.Printf("CheckTx response data was unexpected. Got %X expected %X\n",
|
||||
data, dataExp)
|
||||
return errors.New("CheckTx")
|
||||
return errors.New("checkTx")
|
||||
}
|
||||
fmt.Println("Passed test: CheckTx")
|
||||
return nil
|
||||
|
||||
@@ -43,7 +43,7 @@ func commit(client abcicli.Client, hashExp []byte) {
|
||||
}
|
||||
|
||||
func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
|
||||
res, err := client.DeliverTxSync(txBytes)
|
||||
res, err := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ func ensureABCIIsUp(typ string, n int) error {
|
||||
}
|
||||
|
||||
for i := 0; i < n; i++ {
|
||||
cmd := exec.Command("bash", "-c", cmdString) // nolint: gas
|
||||
cmd := exec.Command("bash", "-c", cmdString)
|
||||
_, err = cmd.CombinedOutput()
|
||||
if err == nil {
|
||||
break
|
||||
@@ -53,7 +53,7 @@ func testCounter() {
|
||||
}
|
||||
|
||||
fmt.Printf("Running %s test with abci=%s\n", abciApp, abciType)
|
||||
cmd := exec.Command("bash", "-c", fmt.Sprintf("abci-cli %s", abciApp)) // nolint: gas
|
||||
cmd := exec.Command("bash", "-c", fmt.Sprintf("abci-cli %s", abciApp))
|
||||
cmd.Stdout = os.Stdout
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatalf("starting %q err: %v", abciApp, err)
|
||||
|
||||
@@ -4,6 +4,7 @@ set -e
|
||||
# These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it
|
||||
|
||||
# Get the directory of where this script is.
|
||||
export PATH="$GOBIN:$PATH"
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
@@ -13,13 +14,13 @@ cd "$DIR"
|
||||
|
||||
echo "RUN COUNTER OVER SOCKET"
|
||||
# test golang counter
|
||||
ABCI_APP="counter" go run ./*.go
|
||||
ABCI_APP="counter" go run -mod=readonly ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
|
||||
echo "RUN COUNTER OVER GRPC"
|
||||
# test golang counter via grpc
|
||||
ABCI_APP="counter --abci=grpc" ABCI="grpc" go run ./*.go
|
||||
ABCI_APP="counter --abci=grpc" ABCI="grpc" go run -mod=readonly ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
# test nodejs counter
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
set -e
|
||||
|
||||
# Get the root directory.
|
||||
export PATH="$GOBIN:$PATH"
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )/../.." && pwd )"
|
||||
|
||||
@@ -15,12 +15,12 @@ type Application interface {
|
||||
Query(RequestQuery) ResponseQuery // Query for state
|
||||
|
||||
// Mempool Connection
|
||||
CheckTx(tx []byte) ResponseCheckTx // Validate a tx for the mempool
|
||||
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
|
||||
|
||||
// Consensus Connection
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
|
||||
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
|
||||
DeliverTx(tx []byte) ResponseDeliverTx // Deliver a tx for full processing
|
||||
DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing
|
||||
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
|
||||
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
|
||||
}
|
||||
@@ -45,11 +45,11 @@ func (BaseApplication) SetOption(req RequestSetOption) ResponseSetOption {
|
||||
return ResponseSetOption{}
|
||||
}
|
||||
|
||||
func (BaseApplication) DeliverTx(tx []byte) ResponseDeliverTx {
|
||||
func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx {
|
||||
return ResponseDeliverTx{Code: CodeTypeOK}
|
||||
}
|
||||
|
||||
func (BaseApplication) CheckTx(tx []byte) ResponseCheckTx {
|
||||
func (BaseApplication) CheckTx(req RequestCheckTx) ResponseCheckTx {
|
||||
return ResponseCheckTx{Code: CodeTypeOK}
|
||||
}
|
||||
|
||||
@@ -103,12 +103,12 @@ func (app *GRPCApplication) SetOption(ctx context.Context, req *RequestSetOption
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
|
||||
res := app.app.DeliverTx(req.Tx)
|
||||
res := app.app.DeliverTx(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
|
||||
res := app.app.CheckTx(req.Tx)
|
||||
res := app.app.CheckTx(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -93,15 +93,15 @@ func ToRequestSetOption(req RequestSetOption) *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestDeliverTx(tx []byte) *Request {
|
||||
func ToRequestDeliverTx(req RequestDeliverTx) *Request {
|
||||
return &Request{
|
||||
Value: &Request_DeliverTx{&RequestDeliverTx{Tx: tx}},
|
||||
Value: &Request_DeliverTx{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestCheckTx(tx []byte) *Request {
|
||||
func ToRequestCheckTx(req RequestCheckTx) *Request {
|
||||
return &Request{
|
||||
Value: &Request_CheckTx{&RequestCheckTx{Tx: tx}},
|
||||
Value: &Request_CheckTx{&req},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,21 +8,26 @@ import (
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/kv"
|
||||
)
|
||||
|
||||
func TestMarshalJSON(t *testing.T) {
|
||||
b, err := json.Marshal(&ResponseDeliverTx{})
|
||||
assert.Nil(t, err)
|
||||
// Do not include empty fields.
|
||||
assert.False(t, strings.Contains(string(b), "code"))
|
||||
|
||||
// include empty fields.
|
||||
assert.True(t, strings.Contains(string(b), "code"))
|
||||
r1 := ResponseCheckTx{
|
||||
Code: 1,
|
||||
Data: []byte("hello"),
|
||||
GasWanted: 43,
|
||||
Tags: []cmn.KVPair{
|
||||
{Key: []byte("pho"), Value: []byte("bo")},
|
||||
Events: []Event{
|
||||
{
|
||||
Type: "testEvent",
|
||||
Attributes: []kv.Pair{
|
||||
{Key: []byte("pho"), Value: []byte("bo")},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
b, err = json.Marshal(&r1)
|
||||
@@ -57,7 +62,8 @@ func TestWriteReadMessageSimple(t *testing.T) {
|
||||
func TestWriteReadMessage(t *testing.T) {
|
||||
cases := []proto.Message{
|
||||
&Header{
|
||||
NumTxs: 4,
|
||||
Height: 4,
|
||||
ChainID: "test",
|
||||
},
|
||||
// TODO: add the rest
|
||||
}
|
||||
@@ -82,8 +88,13 @@ func TestWriteReadMessage2(t *testing.T) {
|
||||
Data: []byte(phrase),
|
||||
Log: phrase,
|
||||
GasWanted: 10,
|
||||
Tags: []cmn.KVPair{
|
||||
cmn.KVPair{Key: []byte("abc"), Value: []byte("def")},
|
||||
Events: []Event{
|
||||
{
|
||||
Type: "testEvent",
|
||||
Attributes: []kv.Pair{
|
||||
{Key: []byte("abc"), Value: []byte("def")},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// TODO: add the rest
|
||||
|
||||
@@ -40,7 +40,7 @@ func main() {
|
||||
}
|
||||
if writeImportTime && !wroteImport {
|
||||
wroteImport = true
|
||||
fmt.Fprintf(outFile, "import \"github.com/tendermint/go-wire/data\"\n")
|
||||
fmt.Fprintf(outFile, "import \"github.com/tendermint/go-amino/data\"\n")
|
||||
|
||||
}
|
||||
if gotPackageLine {
|
||||
|
||||
@@ -42,14 +42,12 @@ func (r ResponseQuery) IsErr() bool {
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// override JSON marshalling so we dont emit defaults (ie. disable omitempty)
|
||||
// note we need Unmarshal functions too because protobuf had the bright idea
|
||||
// to marshal int64->string. cool. cool, cool, cool: https://developers.google.com/protocol-buffers/docs/proto3#json
|
||||
// override JSON marshalling so we emit defaults (ie. disable omitempty)
|
||||
|
||||
var (
|
||||
jsonpbMarshaller = jsonpb.Marshaler{
|
||||
EnumsAsInts: true,
|
||||
EmitDefaults: false,
|
||||
EmitDefaults: true,
|
||||
}
|
||||
jsonpbUnmarshaller = jsonpb.Unmarshaler{}
|
||||
)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,12 +1,14 @@
|
||||
syntax = "proto3";
|
||||
package types;
|
||||
package tendermint.abci.types;
|
||||
option go_package = "github.com/tendermint/tendermint/abci/types";
|
||||
|
||||
// For more information on gogo.proto, see:
|
||||
// https://github.com/gogo/protobuf/blob/master/extensions.md
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "github.com/tendermint/tendermint/libs/common/types.proto";
|
||||
import "github.com/tendermint/tendermint/crypto/merkle/merkle.proto";
|
||||
import "github.com/tendermint/tendermint/libs/kv/types.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "google/protobuf/duration.proto";
|
||||
|
||||
// This file is copied from http://github.com/tendermint/abci
|
||||
// NOTE: When using custom types, mind the warnings.
|
||||
@@ -81,8 +83,14 @@ message RequestBeginBlock {
|
||||
repeated Evidence byzantine_validators = 4 [(gogoproto.nullable)=false];
|
||||
}
|
||||
|
||||
enum CheckTxType {
|
||||
New = 0;
|
||||
Recheck = 1;
|
||||
}
|
||||
|
||||
message RequestCheckTx {
|
||||
bytes tx = 1;
|
||||
CheckTxType type = 2;
|
||||
}
|
||||
|
||||
message RequestDeliverTx {
|
||||
@@ -159,13 +167,13 @@ message ResponseQuery {
|
||||
int64 index = 5;
|
||||
bytes key = 6;
|
||||
bytes value = 7;
|
||||
merkle.Proof proof = 8;
|
||||
tendermint.crypto.merkle.Proof proof = 8;
|
||||
int64 height = 9;
|
||||
string codespace = 10;
|
||||
}
|
||||
|
||||
message ResponseBeginBlock {
|
||||
repeated common.KVPair tags = 1 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
|
||||
repeated Event events = 1 [(gogoproto.nullable)=false, (gogoproto.jsontag)="events,omitempty"];
|
||||
}
|
||||
|
||||
message ResponseCheckTx {
|
||||
@@ -175,7 +183,7 @@ message ResponseCheckTx {
|
||||
string info = 4; // nondeterministic
|
||||
int64 gas_wanted = 5;
|
||||
int64 gas_used = 6;
|
||||
repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
|
||||
repeated Event events = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="events,omitempty"];
|
||||
string codespace = 8;
|
||||
}
|
||||
|
||||
@@ -186,14 +194,14 @@ message ResponseDeliverTx {
|
||||
string info = 4; // nondeterministic
|
||||
int64 gas_wanted = 5;
|
||||
int64 gas_used = 6;
|
||||
repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
|
||||
repeated Event events = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="events,omitempty"];
|
||||
string codespace = 8;
|
||||
}
|
||||
|
||||
message ResponseEndBlock {
|
||||
repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable)=false];
|
||||
ConsensusParams consensus_param_updates = 2;
|
||||
repeated common.KVPair tags = 3 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
|
||||
repeated Event events = 3 [(gogoproto.nullable)=false, (gogoproto.jsontag)="events,omitempty"];
|
||||
}
|
||||
|
||||
message ResponseCommit {
|
||||
@@ -207,23 +215,23 @@ message ResponseCommit {
|
||||
// ConsensusParams contains all consensus-relevant parameters
|
||||
// that can be adjusted by the abci app
|
||||
message ConsensusParams {
|
||||
BlockSizeParams block_size = 1;
|
||||
BlockParams block = 1;
|
||||
EvidenceParams evidence = 2;
|
||||
ValidatorParams validator = 3;
|
||||
}
|
||||
|
||||
// BlockSize contains limits on the block size.
|
||||
message BlockSizeParams {
|
||||
// BlockParams contains limits on the block size.
|
||||
message BlockParams {
|
||||
// Note: must be greater than 0
|
||||
int64 max_bytes = 1;
|
||||
// Note: must be greater or equal to -1
|
||||
int64 max_gas = 2;
|
||||
}
|
||||
|
||||
// EvidenceParams contains limits on the evidence.
|
||||
message EvidenceParams {
|
||||
// Note: must be greater than 0
|
||||
int64 max_age = 1;
|
||||
int64 max_age_num_blocks = 1;
|
||||
google.protobuf.Duration max_age_duration = 2 [(gogoproto.nullable)=false, (gogoproto.stdduration)=true];
|
||||
}
|
||||
|
||||
// ValidatorParams contains limits on validators.
|
||||
@@ -236,6 +244,11 @@ message LastCommitInfo {
|
||||
repeated VoteInfo votes = 2 [(gogoproto.nullable)=false];
|
||||
}
|
||||
|
||||
message Event {
|
||||
string type = 1;
|
||||
repeated tendermint.libs.kv.Pair attributes = 2 [(gogoproto.nullable)=false, (gogoproto.jsontag)="attributes,omitempty"];
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Blockchain Types
|
||||
|
||||
@@ -245,26 +258,24 @@ message Header {
|
||||
string chain_id = 2 [(gogoproto.customname)="ChainID"];
|
||||
int64 height = 3;
|
||||
google.protobuf.Timestamp time = 4 [(gogoproto.nullable)=false, (gogoproto.stdtime)=true];
|
||||
int64 num_txs = 5;
|
||||
int64 total_txs = 6;
|
||||
|
||||
// prev block info
|
||||
BlockID last_block_id = 7 [(gogoproto.nullable)=false];
|
||||
BlockID last_block_id = 5 [(gogoproto.nullable)=false];
|
||||
|
||||
// hashes of block data
|
||||
bytes last_commit_hash = 8; // commit from validators from the last block
|
||||
bytes data_hash = 9; // transactions
|
||||
bytes last_commit_hash = 6; // commit from validators from the last block
|
||||
bytes data_hash = 7; // transactions
|
||||
|
||||
// hashes from the app output from the prev block
|
||||
bytes validators_hash = 10; // validators for the current block
|
||||
bytes next_validators_hash = 11; // validators for the next block
|
||||
bytes consensus_hash = 12; // consensus params for current block
|
||||
bytes app_hash = 13; // state after txs from the previous block
|
||||
bytes last_results_hash = 14;// root hash of all results from the txs from the previous block
|
||||
bytes validators_hash = 8; // validators for the current block
|
||||
bytes next_validators_hash = 9; // validators for the next block
|
||||
bytes consensus_hash = 10; // consensus params for current block
|
||||
bytes app_hash = 11; // state after txs from the previous block
|
||||
bytes last_results_hash = 12;// root hash of all results from the txs from the previous block
|
||||
|
||||
// consensus info
|
||||
bytes evidence_hash = 15; // evidence included in the block
|
||||
bytes proposer_address = 16; // original proposer of the block
|
||||
bytes evidence_hash = 13; // evidence included in the block
|
||||
bytes proposer_address = 14; // original proposer of the block
|
||||
}
|
||||
|
||||
message Version {
|
||||
|
||||
@@ -3,19 +3,22 @@
|
||||
|
||||
package types
|
||||
|
||||
import testing "testing"
|
||||
import math_rand "math/rand"
|
||||
import time "time"
|
||||
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
|
||||
import github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb"
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import golang_proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "github.com/gogo/protobuf/gogoproto"
|
||||
import _ "github.com/golang/protobuf/ptypes/timestamp"
|
||||
import _ "github.com/tendermint/tendermint/crypto/merkle"
|
||||
import _ "github.com/tendermint/tendermint/libs/common"
|
||||
import (
|
||||
fmt "fmt"
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb"
|
||||
github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
golang_proto "github.com/golang/protobuf/proto"
|
||||
_ "github.com/golang/protobuf/ptypes/duration"
|
||||
_ "github.com/golang/protobuf/ptypes/timestamp"
|
||||
_ "github.com/tendermint/tendermint/crypto/merkle"
|
||||
_ "github.com/tendermint/tendermint/libs/kv"
|
||||
math "math"
|
||||
math_rand "math/rand"
|
||||
testing "testing"
|
||||
time "time"
|
||||
)
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@@ -1479,15 +1482,15 @@ func TestConsensusParamsMarshalTo(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockSizeParamsProto(t *testing.T) {
|
||||
func TestBlockParamsProto(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedBlockSizeParams(popr, false)
|
||||
p := NewPopulatedBlockParams(popr, false)
|
||||
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &BlockSizeParams{}
|
||||
msg := &BlockParams{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
@@ -1510,10 +1513,10 @@ func TestBlockSizeParamsProto(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockSizeParamsMarshalTo(t *testing.T) {
|
||||
func TestBlockParamsMarshalTo(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedBlockSizeParams(popr, false)
|
||||
p := NewPopulatedBlockParams(popr, false)
|
||||
size := p.Size()
|
||||
dAtA := make([]byte, size)
|
||||
for i := range dAtA {
|
||||
@@ -1523,7 +1526,7 @@ func TestBlockSizeParamsMarshalTo(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &BlockSizeParams{}
|
||||
msg := &BlockParams{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
@@ -1703,6 +1706,62 @@ func TestLastCommitInfoMarshalTo(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventProto(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedEvent(popr, false)
|
||||
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &Event{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
littlefuzz := make([]byte, len(dAtA))
|
||||
copy(littlefuzz, dAtA)
|
||||
for i := range dAtA {
|
||||
dAtA[i] = byte(popr.Intn(256))
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
if len(littlefuzz) > 0 {
|
||||
fuzzamount := 100
|
||||
for i := 0; i < fuzzamount; i++ {
|
||||
littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))
|
||||
littlefuzz = append(littlefuzz, byte(popr.Intn(256)))
|
||||
}
|
||||
// shouldn't panic
|
||||
_ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventMarshalTo(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedEvent(popr, false)
|
||||
size := p.Size()
|
||||
dAtA := make([]byte, size)
|
||||
for i := range dAtA {
|
||||
dAtA[i] = byte(popr.Intn(256))
|
||||
}
|
||||
_, err := p.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &Event{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
for i := range dAtA {
|
||||
dAtA[i] = byte(popr.Intn(256))
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeaderProto(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
@@ -2675,16 +2734,16 @@ func TestConsensusParamsJSON(t *testing.T) {
|
||||
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
func TestBlockSizeParamsJSON(t *testing.T) {
|
||||
func TestBlockParamsJSON(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedBlockSizeParams(popr, true)
|
||||
p := NewPopulatedBlockParams(popr, true)
|
||||
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
|
||||
jsondata, err := marshaler.MarshalToString(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &BlockSizeParams{}
|
||||
msg := &BlockParams{}
|
||||
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
@@ -2747,6 +2806,24 @@ func TestLastCommitInfoJSON(t *testing.T) {
|
||||
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
func TestEventJSON(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedEvent(popr, true)
|
||||
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
|
||||
jsondata, err := marshaler.MarshalToString(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &Event{}
|
||||
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
func TestHeaderJSON(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
@@ -3637,12 +3714,12 @@ func TestConsensusParamsProtoCompactText(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockSizeParamsProtoText(t *testing.T) {
|
||||
func TestBlockParamsProtoText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedBlockSizeParams(popr, true)
|
||||
p := NewPopulatedBlockParams(popr, true)
|
||||
dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p)
|
||||
msg := &BlockSizeParams{}
|
||||
msg := &BlockParams{}
|
||||
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
@@ -3651,12 +3728,12 @@ func TestBlockSizeParamsProtoText(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockSizeParamsProtoCompactText(t *testing.T) {
|
||||
func TestBlockParamsProtoCompactText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedBlockSizeParams(popr, true)
|
||||
p := NewPopulatedBlockParams(popr, true)
|
||||
dAtA := github_com_gogo_protobuf_proto.CompactTextString(p)
|
||||
msg := &BlockSizeParams{}
|
||||
msg := &BlockParams{}
|
||||
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
@@ -3749,6 +3826,34 @@ func TestLastCommitInfoProtoCompactText(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventProtoText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedEvent(popr, true)
|
||||
dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p)
|
||||
msg := &Event{}
|
||||
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventProtoCompactText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedEvent(popr, true)
|
||||
dAtA := github_com_gogo_protobuf_proto.CompactTextString(p)
|
||||
msg := &Event{}
|
||||
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeaderProtoText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
@@ -4573,10 +4678,10 @@ func TestConsensusParamsSize(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockSizeParamsSize(t *testing.T) {
|
||||
func TestBlockParamsSize(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedBlockSizeParams(popr, true)
|
||||
p := NewPopulatedBlockParams(popr, true)
|
||||
size2 := github_com_gogo_protobuf_proto.Size(p)
|
||||
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
@@ -4661,6 +4766,28 @@ func TestLastCommitInfoSize(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventSize(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedEvent(popr, true)
|
||||
size2 := github_com_gogo_protobuf_proto.Size(p)
|
||||
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
size := p.Size()
|
||||
if len(dAtA) != size {
|
||||
t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA))
|
||||
}
|
||||
if size2 != size {
|
||||
t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2)
|
||||
}
|
||||
size3 := github_com_gogo_protobuf_proto.Size(p)
|
||||
if size3 != size {
|
||||
t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeaderSize(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
|
||||
@@ -7,7 +7,6 @@ clone_folder: c:\go\path\src\github.com\tendermint\tendermint
|
||||
before_build:
|
||||
- cmd: set GOPATH=%GOROOT%\path
|
||||
- cmd: set PATH=%GOPATH%\bin;%PATH%
|
||||
- cmd: make get_vendor_deps
|
||||
build_script:
|
||||
- cmd: make test
|
||||
test: off
|
||||
|
||||
49
behaviour/peer_behaviour.go
Normal file
49
behaviour/peer_behaviour.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package behaviour
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// PeerBehaviour is a struct describing a behaviour a peer performed.
|
||||
// `peerID` identifies the peer and reason characterizes the specific
|
||||
// behaviour performed by the peer.
|
||||
type PeerBehaviour struct {
|
||||
peerID p2p.ID
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
type badMessage struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// BadMessage returns a badMessage PeerBehaviour.
|
||||
func BadMessage(peerID p2p.ID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: badMessage{explanation}}
|
||||
}
|
||||
|
||||
type messageOutOfOrder struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// MessageOutOfOrder returns a messagOutOfOrder PeerBehaviour.
|
||||
func MessageOutOfOrder(peerID p2p.ID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: messageOutOfOrder{explanation}}
|
||||
}
|
||||
|
||||
type consensusVote struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// ConsensusVote returns a consensusVote PeerBehaviour.
|
||||
func ConsensusVote(peerID p2p.ID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: consensusVote{explanation}}
|
||||
}
|
||||
|
||||
type blockPart struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// BlockPart returns blockPart PeerBehaviour.
|
||||
func BlockPart(peerID p2p.ID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: blockPart{explanation}}
|
||||
}
|
||||
86
behaviour/reporter.go
Normal file
86
behaviour/reporter.go
Normal file
@@ -0,0 +1,86 @@
|
||||
package behaviour
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// Reporter provides an interface for reactors to report the behaviour
|
||||
// of peers synchronously to other components.
|
||||
type Reporter interface {
|
||||
Report(behaviour PeerBehaviour) error
|
||||
}
|
||||
|
||||
// SwitchReporter reports peer behaviour to an internal Switch.
|
||||
type SwitchReporter struct {
|
||||
sw *p2p.Switch
|
||||
}
|
||||
|
||||
// NewSwitchReporter return a new SwitchReporter instance which wraps the Switch.
|
||||
func NewSwitcReporter(sw *p2p.Switch) *SwitchReporter {
|
||||
return &SwitchReporter{
|
||||
sw: sw,
|
||||
}
|
||||
}
|
||||
|
||||
// Report reports the behaviour of a peer to the Switch.
|
||||
func (spbr *SwitchReporter) Report(behaviour PeerBehaviour) error {
|
||||
peer := spbr.sw.Peers().Get(behaviour.peerID)
|
||||
if peer == nil {
|
||||
return errors.New("peer not found")
|
||||
}
|
||||
|
||||
switch reason := behaviour.reason.(type) {
|
||||
case consensusVote, blockPart:
|
||||
spbr.sw.MarkPeerAsGood(peer)
|
||||
case badMessage:
|
||||
spbr.sw.StopPeerForError(peer, reason.explanation)
|
||||
case messageOutOfOrder:
|
||||
spbr.sw.StopPeerForError(peer, reason.explanation)
|
||||
default:
|
||||
return errors.New("unknown reason reported")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MockReporter is a concrete implementation of the Reporter
|
||||
// interface used in reactor tests to ensure reactors report the correct
|
||||
// behaviour in manufactured scenarios.
|
||||
type MockReporter struct {
|
||||
mtx sync.RWMutex
|
||||
pb map[p2p.ID][]PeerBehaviour
|
||||
}
|
||||
|
||||
// NewMockReporter returns a Reporter which records all reported
|
||||
// behaviours in memory.
|
||||
func NewMockReporter() *MockReporter {
|
||||
return &MockReporter{
|
||||
pb: map[p2p.ID][]PeerBehaviour{},
|
||||
}
|
||||
}
|
||||
|
||||
// Report stores the PeerBehaviour produced by the peer identified by peerID.
|
||||
func (mpbr *MockReporter) Report(behaviour PeerBehaviour) error {
|
||||
mpbr.mtx.Lock()
|
||||
defer mpbr.mtx.Unlock()
|
||||
mpbr.pb[behaviour.peerID] = append(mpbr.pb[behaviour.peerID], behaviour)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBehaviours returns all behaviours reported on the peer identified by peerID.
|
||||
func (mpbr *MockReporter) GetBehaviours(peerID p2p.ID) []PeerBehaviour {
|
||||
mpbr.mtx.RLock()
|
||||
defer mpbr.mtx.RUnlock()
|
||||
if items, ok := mpbr.pb[peerID]; ok {
|
||||
result := make([]PeerBehaviour, len(items))
|
||||
copy(result, items)
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
return []PeerBehaviour{}
|
||||
}
|
||||
201
behaviour/reporter_test.go
Normal file
201
behaviour/reporter_test.go
Normal file
@@ -0,0 +1,201 @@
|
||||
package behaviour_test
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
bh "github.com/tendermint/tendermint/behaviour"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// TestMockReporter tests the MockReporter's ability to store reported
|
||||
// peer behaviour in memory indexed by the peerID.
|
||||
func TestMockReporter(t *testing.T) {
|
||||
var peerID p2p.ID = "MockPeer"
|
||||
pr := bh.NewMockReporter()
|
||||
|
||||
behaviours := pr.GetBehaviours(peerID)
|
||||
if len(behaviours) != 0 {
|
||||
t.Error("Expected to have no behaviours reported")
|
||||
}
|
||||
|
||||
badMessage := bh.BadMessage(peerID, "bad message")
|
||||
pr.Report(badMessage)
|
||||
behaviours = pr.GetBehaviours(peerID)
|
||||
if len(behaviours) != 1 {
|
||||
t.Error("Expected the peer have one reported behaviour")
|
||||
}
|
||||
|
||||
if behaviours[0] != badMessage {
|
||||
t.Error("Expected Bad Message to have been reported")
|
||||
}
|
||||
}
|
||||
|
||||
type scriptItem struct {
|
||||
peerID p2p.ID
|
||||
behaviour bh.PeerBehaviour
|
||||
}
|
||||
|
||||
// equalBehaviours returns true if a and b contain the same PeerBehaviours with
|
||||
// the same freequencies and otherwise false.
|
||||
func equalBehaviours(a []bh.PeerBehaviour, b []bh.PeerBehaviour) bool {
|
||||
aHistogram := map[bh.PeerBehaviour]int{}
|
||||
bHistogram := map[bh.PeerBehaviour]int{}
|
||||
|
||||
for _, behaviour := range a {
|
||||
aHistogram[behaviour]++
|
||||
}
|
||||
|
||||
for _, behaviour := range b {
|
||||
bHistogram[behaviour]++
|
||||
}
|
||||
|
||||
if len(aHistogram) != len(bHistogram) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, behaviour := range a {
|
||||
if aHistogram[behaviour] != bHistogram[behaviour] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for _, behaviour := range b {
|
||||
if bHistogram[behaviour] != aHistogram[behaviour] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// TestEqualPeerBehaviours tests that equalBehaviours can tell that two slices
|
||||
// of peer behaviours can be compared for the behaviours they contain and the
|
||||
// freequencies that those behaviours occur.
|
||||
func TestEqualPeerBehaviours(t *testing.T) {
|
||||
var (
|
||||
peerID p2p.ID = "MockPeer"
|
||||
consensusVote = bh.ConsensusVote(peerID, "voted")
|
||||
blockPart = bh.BlockPart(peerID, "blocked")
|
||||
equals = []struct {
|
||||
left []bh.PeerBehaviour
|
||||
right []bh.PeerBehaviour
|
||||
}{
|
||||
// Empty sets
|
||||
{[]bh.PeerBehaviour{}, []bh.PeerBehaviour{}},
|
||||
// Single behaviours
|
||||
{[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{consensusVote}},
|
||||
// Equal Frequencies
|
||||
{[]bh.PeerBehaviour{consensusVote, consensusVote},
|
||||
[]bh.PeerBehaviour{consensusVote, consensusVote}},
|
||||
// Equal frequencies different orders
|
||||
{[]bh.PeerBehaviour{consensusVote, blockPart},
|
||||
[]bh.PeerBehaviour{blockPart, consensusVote}},
|
||||
}
|
||||
unequals = []struct {
|
||||
left []bh.PeerBehaviour
|
||||
right []bh.PeerBehaviour
|
||||
}{
|
||||
// Comparing empty sets to non empty sets
|
||||
{[]bh.PeerBehaviour{}, []bh.PeerBehaviour{consensusVote}},
|
||||
// Different behaviours
|
||||
{[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{blockPart}},
|
||||
// Same behaviour with different frequencies
|
||||
{[]bh.PeerBehaviour{consensusVote},
|
||||
[]bh.PeerBehaviour{consensusVote, consensusVote}},
|
||||
}
|
||||
)
|
||||
|
||||
for _, test := range equals {
|
||||
if !equalBehaviours(test.left, test.right) {
|
||||
t.Errorf("expected %#v and %#v to be equal", test.left, test.right)
|
||||
}
|
||||
}
|
||||
|
||||
for _, test := range unequals {
|
||||
if equalBehaviours(test.left, test.right) {
|
||||
t.Errorf("expected %#v and %#v to be unequal", test.left, test.right)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestPeerBehaviourConcurrency constructs a scenario in which
|
||||
// multiple goroutines are using the same MockReporter instance.
|
||||
// This test reproduces the conditions in which MockReporter will
|
||||
// be used within a Reactor `Receive` method tests to ensure thread safety.
|
||||
func TestMockPeerBehaviourReporterConcurrency(t *testing.T) {
|
||||
var (
|
||||
behaviourScript = []struct {
|
||||
peerID p2p.ID
|
||||
behaviours []bh.PeerBehaviour
|
||||
}{
|
||||
{"1", []bh.PeerBehaviour{bh.ConsensusVote("1", "")}},
|
||||
{"2", []bh.PeerBehaviour{bh.ConsensusVote("2", ""), bh.ConsensusVote("2", ""), bh.ConsensusVote("2", "")}},
|
||||
{
|
||||
"3",
|
||||
[]bh.PeerBehaviour{bh.BlockPart("3", ""),
|
||||
bh.ConsensusVote("3", ""),
|
||||
bh.BlockPart("3", ""),
|
||||
bh.ConsensusVote("3", "")}},
|
||||
{
|
||||
"4",
|
||||
[]bh.PeerBehaviour{bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", ""),
|
||||
bh.ConsensusVote("4", "")}},
|
||||
{
|
||||
"5",
|
||||
[]bh.PeerBehaviour{bh.BlockPart("5", ""),
|
||||
bh.ConsensusVote("5", ""),
|
||||
bh.BlockPart("5", ""),
|
||||
bh.ConsensusVote("5", "")}},
|
||||
}
|
||||
)
|
||||
|
||||
var receiveWg sync.WaitGroup
|
||||
pr := bh.NewMockReporter()
|
||||
scriptItems := make(chan scriptItem)
|
||||
done := make(chan int)
|
||||
numConsumers := 3
|
||||
for i := 0; i < numConsumers; i++ {
|
||||
receiveWg.Add(1)
|
||||
go func() {
|
||||
defer receiveWg.Done()
|
||||
for {
|
||||
select {
|
||||
case pb := <-scriptItems:
|
||||
pr.Report(pb.behaviour)
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
var sendingWg sync.WaitGroup
|
||||
sendingWg.Add(1)
|
||||
go func() {
|
||||
defer sendingWg.Done()
|
||||
for _, item := range behaviourScript {
|
||||
for _, reason := range item.behaviours {
|
||||
scriptItems <- scriptItem{item.peerID, reason}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
sendingWg.Wait()
|
||||
|
||||
for i := 0; i < numConsumers; i++ {
|
||||
done <- 1
|
||||
}
|
||||
|
||||
receiveWg.Wait()
|
||||
|
||||
for _, items := range behaviourScript {
|
||||
reported := pr.GetBehaviours(items.peerID)
|
||||
if !equalBehaviours(reported, items.behaviours) {
|
||||
t.Errorf("expected peer %s to have behaved \nExpected: %#v \nGot %#v \n",
|
||||
items.peerID, items.behaviours, reported)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
package benchmarks
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func BenchmarkAtomicUintPtr(b *testing.B) {
|
||||
b.StopTimer()
|
||||
pointers := make([]uintptr, 1000)
|
||||
b.Log(unsafe.Sizeof(pointers[0]))
|
||||
b.StartTimer()
|
||||
|
||||
for j := 0; j < b.N; j++ {
|
||||
atomic.StoreUintptr(&pointers[j%1000], uintptr(j))
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkAtomicPointer(b *testing.B) {
|
||||
b.StopTimer()
|
||||
pointers := make([]unsafe.Pointer, 1000)
|
||||
b.Log(unsafe.Sizeof(pointers[0]))
|
||||
b.StartTimer()
|
||||
|
||||
for j := 0; j < b.N; j++ {
|
||||
atomic.StorePointer(&pointers[j%1000], unsafe.Pointer(uintptr(j)))
|
||||
}
|
||||
}
|
||||
2
benchmarks/blockchain/.gitignore
vendored
2
benchmarks/blockchain/.gitignore
vendored
@@ -1,2 +0,0 @@
|
||||
data
|
||||
|
||||
@@ -1,80 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
DATA=$GOPATH/src/github.com/tendermint/tendermint/benchmarks/blockchain/data
|
||||
if [ ! -d $DATA ]; then
|
||||
echo "no data found, generating a chain... (this only has to happen once)"
|
||||
|
||||
tendermint init --home $DATA
|
||||
cp $DATA/config.toml $DATA/config2.toml
|
||||
echo "
|
||||
[consensus]
|
||||
timeout_commit = 0
|
||||
" >> $DATA/config.toml
|
||||
|
||||
echo "starting node"
|
||||
tendermint node \
|
||||
--home $DATA \
|
||||
--proxy_app kvstore \
|
||||
--p2p.laddr tcp://127.0.0.1:56656 \
|
||||
--rpc.laddr tcp://127.0.0.1:56657 \
|
||||
--log_level error &
|
||||
|
||||
echo "making blocks for 60s"
|
||||
sleep 60
|
||||
|
||||
mv $DATA/config2.toml $DATA/config.toml
|
||||
|
||||
kill %1
|
||||
|
||||
echo "done generating chain."
|
||||
fi
|
||||
|
||||
# validator node
|
||||
HOME1=$TMPDIR$RANDOM$RANDOM
|
||||
cp -R $DATA $HOME1
|
||||
echo "starting validator node"
|
||||
tendermint node \
|
||||
--home $HOME1 \
|
||||
--proxy_app kvstore \
|
||||
--p2p.laddr tcp://127.0.0.1:56656 \
|
||||
--rpc.laddr tcp://127.0.0.1:56657 \
|
||||
--log_level error &
|
||||
sleep 1
|
||||
|
||||
# downloader node
|
||||
HOME2=$TMPDIR$RANDOM$RANDOM
|
||||
tendermint init --home $HOME2
|
||||
cp $HOME1/genesis.json $HOME2
|
||||
printf "starting downloader node"
|
||||
tendermint node \
|
||||
--home $HOME2 \
|
||||
--proxy_app kvstore \
|
||||
--p2p.laddr tcp://127.0.0.1:56666 \
|
||||
--rpc.laddr tcp://127.0.0.1:56667 \
|
||||
--p2p.persistent_peers 127.0.0.1:56656 \
|
||||
--log_level error &
|
||||
|
||||
# wait for node to start up so we only count time where we are actually syncing
|
||||
sleep 0.5
|
||||
while curl localhost:56667/status 2> /dev/null | grep "\"latest_block_height\": 0," > /dev/null
|
||||
do
|
||||
printf '.'
|
||||
sleep 0.2
|
||||
done
|
||||
echo
|
||||
|
||||
echo "syncing blockchain for 10s"
|
||||
for i in {1..10}
|
||||
do
|
||||
sleep 1
|
||||
HEIGHT="$(curl localhost:56667/status 2> /dev/null \
|
||||
| grep 'latest_block_height' \
|
||||
| grep -o ' [0-9]*' \
|
||||
| xargs)"
|
||||
let 'RATE = HEIGHT / i'
|
||||
echo "height: $HEIGHT, blocks/sec: $RATE"
|
||||
done
|
||||
|
||||
kill %1
|
||||
kill %2
|
||||
rm -rf $HOME1 $HOME2
|
||||
@@ -1,19 +0,0 @@
|
||||
package benchmarks
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func BenchmarkChanMakeClose(b *testing.B) {
|
||||
b.StopTimer()
|
||||
b.StartTimer()
|
||||
|
||||
for j := 0; j < b.N; j++ {
|
||||
foo := make(chan struct{})
|
||||
close(foo)
|
||||
something, ok := <-foo
|
||||
if ok {
|
||||
b.Error(something, ok)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,123 +0,0 @@
|
||||
package benchmarks
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/go-amino"
|
||||
|
||||
proto "github.com/tendermint/tendermint/benchmarks/proto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
)
|
||||
|
||||
func testNodeInfo(id p2p.ID) p2p.DefaultNodeInfo {
|
||||
return p2p.DefaultNodeInfo{
|
||||
ProtocolVersion: p2p.ProtocolVersion{1, 2, 3},
|
||||
ID_: id,
|
||||
Moniker: "SOMENAME",
|
||||
Network: "SOMENAME",
|
||||
ListenAddr: "SOMEADDR",
|
||||
Version: "SOMEVER",
|
||||
Other: p2p.DefaultNodeInfoOther{
|
||||
TxIndex: "on",
|
||||
RPCAddress: "0.0.0.0:26657",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeStatusWire(b *testing.B) {
|
||||
b.StopTimer()
|
||||
cdc := amino.NewCodec()
|
||||
ctypes.RegisterAmino(cdc)
|
||||
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
|
||||
status := &ctypes.ResultStatus{
|
||||
NodeInfo: testNodeInfo(nodeKey.ID()),
|
||||
SyncInfo: ctypes.SyncInfo{
|
||||
LatestBlockHash: []byte("SOMEBYTES"),
|
||||
LatestBlockHeight: 123,
|
||||
LatestBlockTime: time.Unix(0, 1234),
|
||||
},
|
||||
ValidatorInfo: ctypes.ValidatorInfo{
|
||||
PubKey: nodeKey.PubKey(),
|
||||
},
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
counter := 0
|
||||
for i := 0; i < b.N; i++ {
|
||||
jsonBytes, err := cdc.MarshalJSON(status)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
counter += len(jsonBytes)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func BenchmarkEncodeNodeInfoWire(b *testing.B) {
|
||||
b.StopTimer()
|
||||
cdc := amino.NewCodec()
|
||||
ctypes.RegisterAmino(cdc)
|
||||
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
|
||||
nodeInfo := testNodeInfo(nodeKey.ID())
|
||||
b.StartTimer()
|
||||
|
||||
counter := 0
|
||||
for i := 0; i < b.N; i++ {
|
||||
jsonBytes, err := cdc.MarshalJSON(nodeInfo)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
counter += len(jsonBytes)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeNodeInfoBinary(b *testing.B) {
|
||||
b.StopTimer()
|
||||
cdc := amino.NewCodec()
|
||||
ctypes.RegisterAmino(cdc)
|
||||
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
|
||||
nodeInfo := testNodeInfo(nodeKey.ID())
|
||||
b.StartTimer()
|
||||
|
||||
counter := 0
|
||||
for i := 0; i < b.N; i++ {
|
||||
jsonBytes := cdc.MustMarshalBinaryBare(nodeInfo)
|
||||
counter += len(jsonBytes)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func BenchmarkEncodeNodeInfoProto(b *testing.B) {
|
||||
b.StopTimer()
|
||||
nodeKey := p2p.NodeKey{PrivKey: ed25519.GenPrivKey()}
|
||||
nodeID := string(nodeKey.ID())
|
||||
someName := "SOMENAME"
|
||||
someAddr := "SOMEADDR"
|
||||
someVer := "SOMEVER"
|
||||
someString := "SOMESTRING"
|
||||
otherString := "OTHERSTRING"
|
||||
nodeInfo := proto.NodeInfo{
|
||||
Id: &proto.ID{Id: &nodeID},
|
||||
Moniker: &someName,
|
||||
Network: &someName,
|
||||
ListenAddr: &someAddr,
|
||||
Version: &someVer,
|
||||
Other: []string{someString, otherString},
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
counter := 0
|
||||
for i := 0; i < b.N; i++ {
|
||||
bytes, err := nodeInfo.Marshal()
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
return
|
||||
}
|
||||
//jsonBytes := wire.JSONBytes(nodeInfo)
|
||||
counter += len(bytes)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
package benchmarks
|
||||
@@ -1,35 +0,0 @@
|
||||
package benchmarks
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
func BenchmarkSomething(b *testing.B) {
|
||||
b.StopTimer()
|
||||
numItems := 100000
|
||||
numChecks := 100000
|
||||
keys := make([]string, numItems)
|
||||
for i := 0; i < numItems; i++ {
|
||||
keys[i] = cmn.RandStr(100)
|
||||
}
|
||||
txs := make([]string, numChecks)
|
||||
for i := 0; i < numChecks; i++ {
|
||||
txs[i] = cmn.RandStr(100)
|
||||
}
|
||||
b.StartTimer()
|
||||
|
||||
counter := 0
|
||||
for j := 0; j < b.N; j++ {
|
||||
foo := make(map[string]string)
|
||||
for _, key := range keys {
|
||||
foo[key] = key
|
||||
}
|
||||
for _, tx := range txs {
|
||||
if _, ok := foo[tx]; ok {
|
||||
counter++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,33 +0,0 @@
|
||||
package benchmarks
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
func BenchmarkFileWrite(b *testing.B) {
|
||||
b.StopTimer()
|
||||
file, err := os.OpenFile("benchmark_file_write.out",
|
||||
os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600)
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
testString := cmn.RandStr(200) + "\n"
|
||||
b.StartTimer()
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := file.Write([]byte(testString))
|
||||
if err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := file.Close(); err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
if err := os.Remove("benchmark_file_write.out"); err != nil {
|
||||
b.Error(err)
|
||||
}
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
Doing some protobuf tests here.
|
||||
Using gogoprotobuf.
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,29 +0,0 @@
|
||||
message ResultStatus {
|
||||
optional NodeInfo nodeInfo = 1;
|
||||
required PubKey pubKey = 2;
|
||||
required bytes latestBlockHash = 3;
|
||||
required int64 latestBlockHeight = 4;
|
||||
required int64 latestBlocktime = 5;
|
||||
}
|
||||
|
||||
message NodeInfo {
|
||||
required ID id = 1;
|
||||
required string moniker = 2;
|
||||
required string network = 3;
|
||||
required string remoteAddr = 4;
|
||||
required string listenAddr = 5;
|
||||
required string version = 6;
|
||||
repeated string other = 7;
|
||||
}
|
||||
|
||||
message ID {
|
||||
required string id = 1;
|
||||
}
|
||||
|
||||
message PubKey {
|
||||
optional PubKeyEd25519 ed25519 = 1;
|
||||
}
|
||||
|
||||
message PubKeyEd25519 {
|
||||
required bytes bytes = 1;
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
rpcclient "github.com/tendermint/tendermint/rpc/lib/client"
|
||||
)
|
||||
|
||||
func main() {
|
||||
wsc := rpcclient.NewWSClient("127.0.0.1:26657", "/websocket")
|
||||
err := wsc.Start()
|
||||
if err != nil {
|
||||
cmn.Exit(err.Error())
|
||||
}
|
||||
defer wsc.Stop()
|
||||
|
||||
// Read a bunch of responses
|
||||
go func() {
|
||||
for {
|
||||
_, ok := <-wsc.ResponsesCh
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
//fmt.Println("Received response", string(wire.JSONBytes(res)))
|
||||
}
|
||||
}()
|
||||
|
||||
// Make a bunch of requests
|
||||
buf := make([]byte, 32)
|
||||
for i := 0; ; i++ {
|
||||
binary.BigEndian.PutUint64(buf, uint64(i))
|
||||
//txBytes := hex.EncodeToString(buf[:n])
|
||||
fmt.Print(".")
|
||||
err = wsc.Call(context.TODO(), "broadcast_tx", map[string]interface{}{"tx": buf[:8]})
|
||||
if err != nil {
|
||||
cmn.Exit(err.Error())
|
||||
}
|
||||
if i%1000 == 0 {
|
||||
fmt.Println(i)
|
||||
}
|
||||
time.Sleep(time.Microsecond * 1000)
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,7 @@
|
||||
package blockchain
|
||||
package v0
|
||||
|
||||
import (
|
||||
"github.com/tendermint/go-amino"
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package blockchain
|
||||
package v0
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@@ -8,9 +8,9 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
flow "github.com/tendermint/tendermint/libs/flowrate"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -59,8 +59,9 @@ var peerTimeout = 15 * time.Second // not const so we can override with tests
|
||||
are not at peer limits, we can probably switch to consensus reactor
|
||||
*/
|
||||
|
||||
// BlockPool keeps track of the fast sync peers, block requests and block responses.
|
||||
type BlockPool struct {
|
||||
cmn.BaseService
|
||||
service.BaseService
|
||||
startTime time.Time
|
||||
|
||||
mtx sync.Mutex
|
||||
@@ -69,7 +70,7 @@ type BlockPool struct {
|
||||
height int64 // the lowest key in requesters.
|
||||
// peers
|
||||
peers map[p2p.ID]*bpPeer
|
||||
maxPeerHeight int64
|
||||
maxPeerHeight int64 // the biggest reported height
|
||||
|
||||
// atomic
|
||||
numPending int32 // number of requests pending assignment or block response
|
||||
@@ -78,6 +79,8 @@ type BlockPool struct {
|
||||
errorsCh chan<- peerError
|
||||
}
|
||||
|
||||
// NewBlockPool returns a new BlockPool with the height equal to start. Block
|
||||
// requests and errors will be sent to requestsCh and errorsCh accordingly.
|
||||
func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool {
|
||||
bp := &BlockPool{
|
||||
peers: make(map[p2p.ID]*bpPeer),
|
||||
@@ -89,19 +92,19 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
}
|
||||
bp.BaseService = *cmn.NewBaseService(nil, "BlockPool", bp)
|
||||
bp.BaseService = *service.NewBaseService(nil, "BlockPool", bp)
|
||||
return bp
|
||||
}
|
||||
|
||||
// OnStart implements service.Service by spawning requesters routine and recording
|
||||
// pool's start time.
|
||||
func (pool *BlockPool) OnStart() error {
|
||||
go pool.makeRequestersRoutine()
|
||||
pool.startTime = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pool *BlockPool) OnStop() {}
|
||||
|
||||
// Run spawns requesters as needed.
|
||||
// spawns requesters as needed
|
||||
func (pool *BlockPool) makeRequestersRoutine() {
|
||||
for {
|
||||
if !pool.IsRunning() {
|
||||
@@ -109,17 +112,18 @@ func (pool *BlockPool) makeRequestersRoutine() {
|
||||
}
|
||||
|
||||
_, numPending, lenRequesters := pool.GetStatus()
|
||||
if numPending >= maxPendingRequests {
|
||||
switch {
|
||||
case numPending >= maxPendingRequests:
|
||||
// sleep for a bit.
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
// check for timed out peers
|
||||
pool.removeTimedoutPeers()
|
||||
} else if lenRequesters >= maxTotalRequesters {
|
||||
case lenRequesters >= maxTotalRequesters:
|
||||
// sleep for a bit.
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
// check for timed out peers
|
||||
pool.removeTimedoutPeers()
|
||||
} else {
|
||||
default:
|
||||
// request for more blocks.
|
||||
pool.makeNextRequester()
|
||||
}
|
||||
@@ -150,6 +154,8 @@ func (pool *BlockPool) removeTimedoutPeers() {
|
||||
}
|
||||
}
|
||||
|
||||
// GetStatus returns pool's height, numPending requests and the number of
|
||||
// requesters.
|
||||
func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequesters int) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
@@ -157,6 +163,7 @@ func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequester
|
||||
return pool.height, atomic.LoadInt32(&pool.numPending), len(pool.requesters)
|
||||
}
|
||||
|
||||
// IsCaughtUp returns true if this node is caught up, false - otherwise.
|
||||
// TODO: relax conditions, prevent abuse.
|
||||
func (pool *BlockPool) IsCaughtUp() bool {
|
||||
pool.mtx.Lock()
|
||||
@@ -170,14 +177,16 @@ func (pool *BlockPool) IsCaughtUp() bool {
|
||||
|
||||
// Some conditions to determine if we're caught up.
|
||||
// Ensures we've either received a block or waited some amount of time,
|
||||
// and that we're synced to the highest known height. Note we use maxPeerHeight - 1
|
||||
// because to sync block H requires block H+1 to verify the LastCommit.
|
||||
// and that we're synced to the highest known height.
|
||||
// Note we use maxPeerHeight - 1 because to sync block H requires block H+1
|
||||
// to verify the LastCommit.
|
||||
receivedBlockOrTimedOut := pool.height > 0 || time.Since(pool.startTime) > 5*time.Second
|
||||
ourChainIsLongestAmongPeers := pool.maxPeerHeight == 0 || pool.height >= (pool.maxPeerHeight-1)
|
||||
isCaughtUp := receivedBlockOrTimedOut && ourChainIsLongestAmongPeers
|
||||
return isCaughtUp
|
||||
}
|
||||
|
||||
// PeekTwoBlocks returns blocks at pool.height and pool.height+1.
|
||||
// We need to see the second block's Commit to validate the first block.
|
||||
// So we peek two blocks at a time.
|
||||
// The caller will verify the commit.
|
||||
@@ -194,7 +203,7 @@ func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block)
|
||||
return
|
||||
}
|
||||
|
||||
// Pop the first block at pool.height
|
||||
// PopRequest pops the first block at pool.height.
|
||||
// It must have been validated by 'second'.Commit from PeekTwoBlocks().
|
||||
func (pool *BlockPool) PopRequest() {
|
||||
pool.mtx.Lock()
|
||||
@@ -214,7 +223,7 @@ func (pool *BlockPool) PopRequest() {
|
||||
}
|
||||
}
|
||||
|
||||
// Invalidates the block at pool.height,
|
||||
// RedoRequest invalidates the block at pool.height,
|
||||
// Remove the peer and redo request from others.
|
||||
// Returns the ID of the removed peer.
|
||||
func (pool *BlockPool) RedoRequest(height int64) p2p.ID {
|
||||
@@ -230,6 +239,7 @@ func (pool *BlockPool) RedoRequest(height int64) p2p.ID {
|
||||
return peerID
|
||||
}
|
||||
|
||||
// AddBlock validates that the block comes from the peer it was expected from and calls the requester to store it.
|
||||
// TODO: ensure that blocks come in order for each peer.
|
||||
func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int) {
|
||||
pool.mtx.Lock()
|
||||
@@ -237,7 +247,14 @@ func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int
|
||||
|
||||
requester := pool.requesters[block.Height]
|
||||
if requester == nil {
|
||||
pool.Logger.Info("peer sent us a block we didn't expect", "peer", peerID, "curHeight", pool.height, "blockHeight", block.Height)
|
||||
pool.Logger.Info(
|
||||
"peer sent us a block we didn't expect",
|
||||
"peer",
|
||||
peerID,
|
||||
"curHeight",
|
||||
pool.height,
|
||||
"blockHeight",
|
||||
block.Height)
|
||||
diff := pool.height - block.Height
|
||||
if diff < 0 {
|
||||
diff *= -1
|
||||
@@ -260,14 +277,14 @@ func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int
|
||||
}
|
||||
}
|
||||
|
||||
// MaxPeerHeight returns the highest height reported by a peer.
|
||||
// MaxPeerHeight returns the highest reported height.
|
||||
func (pool *BlockPool) MaxPeerHeight() int64 {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
return pool.maxPeerHeight
|
||||
}
|
||||
|
||||
// Sets the peer's alleged blockchain height.
|
||||
// SetPeerHeight sets the peer's alleged blockchain height.
|
||||
func (pool *BlockPool) SetPeerHeight(peerID p2p.ID, height int64) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
@@ -286,6 +303,8 @@ func (pool *BlockPool) SetPeerHeight(peerID p2p.ID, height int64) {
|
||||
}
|
||||
}
|
||||
|
||||
// RemovePeer removes the peer with peerID from the pool. If there's no peer
|
||||
// with peerID, function is a no-op.
|
||||
func (pool *BlockPool) RemovePeer(peerID p2p.ID) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
@@ -299,7 +318,32 @@ func (pool *BlockPool) removePeer(peerID p2p.ID) {
|
||||
requester.redo(peerID)
|
||||
}
|
||||
}
|
||||
delete(pool.peers, peerID)
|
||||
|
||||
peer, ok := pool.peers[peerID]
|
||||
if ok {
|
||||
if peer.timeout != nil {
|
||||
peer.timeout.Stop()
|
||||
}
|
||||
|
||||
delete(pool.peers, peerID)
|
||||
|
||||
// Find a new peer with the biggest height and update maxPeerHeight if the
|
||||
// peer's height was the biggest.
|
||||
if peer.height == pool.maxPeerHeight {
|
||||
pool.updateMaxPeerHeight()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no peers are left, maxPeerHeight is set to 0.
|
||||
func (pool *BlockPool) updateMaxPeerHeight() {
|
||||
var max int64
|
||||
for _, peer := range pool.peers {
|
||||
if peer.height > max {
|
||||
max = peer.height
|
||||
}
|
||||
}
|
||||
pool.maxPeerHeight = max
|
||||
}
|
||||
|
||||
// Pick an available peer with at least the given minHeight.
|
||||
@@ -363,7 +407,8 @@ func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
|
||||
pool.errorsCh <- peerError{err, peerID}
|
||||
}
|
||||
|
||||
// unused by tendermint; left for debugging purposes
|
||||
// for debugging purposes
|
||||
//nolint:unused
|
||||
func (pool *BlockPool) debug() string {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
@@ -384,14 +429,14 @@ func (pool *BlockPool) debug() string {
|
||||
//-------------------------------------
|
||||
|
||||
type bpPeer struct {
|
||||
didTimeout bool
|
||||
numPending int32
|
||||
height int64
|
||||
pool *BlockPool
|
||||
id p2p.ID
|
||||
recvMonitor *flow.Monitor
|
||||
|
||||
height int64
|
||||
numPending int32
|
||||
timeout *time.Timer
|
||||
didTimeout bool
|
||||
timeout *time.Timer
|
||||
|
||||
logger log.Logger
|
||||
}
|
||||
@@ -456,7 +501,7 @@ func (peer *bpPeer) onTimeout() {
|
||||
//-------------------------------------
|
||||
|
||||
type bpRequester struct {
|
||||
cmn.BaseService
|
||||
service.BaseService
|
||||
pool *BlockPool
|
||||
height int64
|
||||
gotBlockCh chan struct{}
|
||||
@@ -477,7 +522,7 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester {
|
||||
peerID: "",
|
||||
block: nil,
|
||||
}
|
||||
bpr.BaseService = *cmn.NewBaseService(nil, "bpRequester", bpr)
|
||||
bpr.BaseService = *service.NewBaseService(nil, "bpRequester", bpr)
|
||||
return bpr
|
||||
}
|
||||
|
||||
@@ -531,9 +576,9 @@ func (bpr *bpRequester) reset() {
|
||||
// Tells bpRequester to pick another peer and try again.
|
||||
// NOTE: Nonblocking, and does nothing if another redo
|
||||
// was already requested.
|
||||
func (bpr *bpRequester) redo(peerId p2p.ID) {
|
||||
func (bpr *bpRequester) redo(peerID p2p.ID) {
|
||||
select {
|
||||
case bpr.redoCh <- peerId:
|
||||
case bpr.redoCh <- peerID:
|
||||
default:
|
||||
}
|
||||
}
|
||||
@@ -588,8 +633,8 @@ OUTER_LOOP:
|
||||
}
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
// BlockRequest stores a block request identified by the block Height and the PeerID responsible for
|
||||
// delivering the block
|
||||
type BlockRequest struct {
|
||||
Height int64
|
||||
PeerID p2p.ID
|
||||
@@ -1,12 +1,15 @@
|
||||
package blockchain
|
||||
package v0
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -39,7 +42,10 @@ func (p testPeer) runInputRoutine() {
|
||||
func (p testPeer) simulateInput(input inputData) {
|
||||
block := &types.Block{Header: types.Header{Height: input.request.Height}}
|
||||
input.pool.AddBlock(input.request.PeerID, block, 123)
|
||||
input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height)
|
||||
// TODO: uncommenting this creates a race which is detected by:
|
||||
// https://github.com/golang/go/blob/2bd767b1022dd3254bcec469f0ee164024726486/src/testing/testing.go#L854-L856
|
||||
// see: https://github.com/tendermint/tendermint/issues/3390#issue-418379890
|
||||
// input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height)
|
||||
}
|
||||
|
||||
type testPeers map[p2p.ID]testPeer
|
||||
@@ -59,14 +65,14 @@ func (ps testPeers) stop() {
|
||||
func makePeers(numPeers int, minHeight, maxHeight int64) testPeers {
|
||||
peers := make(testPeers, numPeers)
|
||||
for i := 0; i < numPeers; i++ {
|
||||
peerID := p2p.ID(cmn.RandStr(12))
|
||||
height := minHeight + cmn.RandInt63n(maxHeight-minHeight)
|
||||
peerID := p2p.ID(tmrand.Str(12))
|
||||
height := minHeight + tmrand.Int63n(maxHeight-minHeight)
|
||||
peers[peerID] = testPeer{peerID, height, make(chan inputData, 10)}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
func TestBasic(t *testing.T) {
|
||||
func TestBlockPoolBasic(t *testing.T) {
|
||||
start := int64(42)
|
||||
peers := makePeers(10, start+1, 1000)
|
||||
errorsCh := make(chan peerError, 1000)
|
||||
@@ -122,7 +128,7 @@ func TestBasic(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeout(t *testing.T) {
|
||||
func TestBlockPoolTimeout(t *testing.T) {
|
||||
start := int64(42)
|
||||
peers := makePeers(10, start+1, 1000)
|
||||
errorsCh := make(chan peerError, 1000)
|
||||
@@ -180,3 +186,40 @@ func TestTimeout(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
peers := make(testPeers, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
peerID := p2p.ID(fmt.Sprintf("%d", i+1))
|
||||
height := int64(i + 1)
|
||||
peers[peerID] = testPeer{peerID, height, make(chan inputData)}
|
||||
}
|
||||
requestsCh := make(chan BlockRequest)
|
||||
errorsCh := make(chan peerError)
|
||||
|
||||
pool := NewBlockPool(1, requestsCh, errorsCh)
|
||||
pool.SetLogger(log.TestingLogger())
|
||||
err := pool.Start()
|
||||
require.NoError(t, err)
|
||||
defer pool.Stop()
|
||||
|
||||
// add peers
|
||||
for peerID, peer := range peers {
|
||||
pool.SetPeerHeight(peerID, peer.height)
|
||||
}
|
||||
assert.EqualValues(t, 10, pool.MaxPeerHeight())
|
||||
|
||||
// remove not-existing peer
|
||||
assert.NotPanics(t, func() { pool.RemovePeer(p2p.ID("Superman")) })
|
||||
|
||||
// remove peer with biggest height
|
||||
pool.RemovePeer(p2p.ID("10"))
|
||||
assert.EqualValues(t, 9, pool.MaxPeerHeight())
|
||||
|
||||
// remove all peers
|
||||
for peerID := range peers {
|
||||
pool.RemovePeer(peerID)
|
||||
}
|
||||
|
||||
assert.EqualValues(t, 0, pool.MaxPeerHeight())
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package blockchain
|
||||
package v0
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@@ -8,10 +8,10 @@ import (
|
||||
|
||||
amino "github.com/tendermint/go-amino"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -61,7 +61,7 @@ type BlockchainReactor struct {
|
||||
initialState sm.State
|
||||
|
||||
blockExec *sm.BlockExecutor
|
||||
store *BlockStore
|
||||
store *store.BlockStore
|
||||
pool *BlockPool
|
||||
fastSync bool
|
||||
|
||||
@@ -70,7 +70,7 @@ type BlockchainReactor struct {
|
||||
}
|
||||
|
||||
// NewBlockchainReactor returns new reactor instance.
|
||||
func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *BlockStore,
|
||||
func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
|
||||
fastSync bool) *BlockchainReactor {
|
||||
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
@@ -102,13 +102,13 @@ func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *Bl
|
||||
return bcR
|
||||
}
|
||||
|
||||
// SetLogger implements cmn.Service by setting the logger on reactor and pool.
|
||||
// SetLogger implements service.Service by setting the logger on reactor and pool.
|
||||
func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
|
||||
bcR.BaseService.Logger = l
|
||||
bcR.pool.Logger = l
|
||||
}
|
||||
|
||||
// OnStart implements cmn.Service.
|
||||
// OnStart implements service.Service.
|
||||
func (bcR *BlockchainReactor) OnStart() error {
|
||||
if bcR.fastSync {
|
||||
err := bcR.pool.Start()
|
||||
@@ -120,7 +120,7 @@ func (bcR *BlockchainReactor) OnStart() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements cmn.Service.
|
||||
// OnStop implements service.Service.
|
||||
func (bcR *BlockchainReactor) OnStop() {
|
||||
bcR.pool.Stop()
|
||||
}
|
||||
@@ -141,9 +141,9 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
|
||||
if !peer.Send(BlockchainChannel, msgBytes) {
|
||||
// doing nothing, will try later in `poolRoutine`
|
||||
}
|
||||
peer.Send(BlockchainChannel, msgBytes)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
// bcStatusResponseMessage from the peer and call pool.SetPeerHeight
|
||||
}
|
||||
@@ -191,18 +191,13 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *bcBlockRequestMessage:
|
||||
if queued := bcR.respondToPeer(msg, src); !queued {
|
||||
// Unfortunately not queued since the queue is full.
|
||||
}
|
||||
bcR.respondToPeer(msg, src)
|
||||
case *bcBlockResponseMessage:
|
||||
bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes))
|
||||
case *bcStatusRequestMessage:
|
||||
// Send peer our state.
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
|
||||
queued := src.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
// sorry
|
||||
}
|
||||
src.TrySend(BlockchainChannel, msgBytes)
|
||||
case *bcStatusResponseMessage:
|
||||
// Got a peer status. Unverified.
|
||||
bcR.pool.SetPeerHeight(src.ID(), msg.Height)
|
||||
@@ -229,32 +224,40 @@ func (bcR *BlockchainReactor) poolRoutine() {
|
||||
|
||||
didProcessCh := make(chan struct{}, 1)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-bcR.Quit():
|
||||
return
|
||||
case <-bcR.pool.Quit():
|
||||
return
|
||||
case request := <-bcR.requestsCh:
|
||||
peer := bcR.Switch.Peers().Get(request.PeerID)
|
||||
if peer == nil {
|
||||
continue
|
||||
}
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcBlockRequestMessage{request.Height})
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height)
|
||||
}
|
||||
case err := <-bcR.errorsCh:
|
||||
peer := bcR.Switch.Peers().Get(err.peerID)
|
||||
if peer != nil {
|
||||
bcR.Switch.StopPeerForError(peer, err)
|
||||
}
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// ask for status updates
|
||||
go bcR.BroadcastStatusRequest() // nolint: errcheck
|
||||
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
FOR_LOOP:
|
||||
for {
|
||||
select {
|
||||
case request := <-bcR.requestsCh:
|
||||
peer := bcR.Switch.Peers().Get(request.PeerID)
|
||||
if peer == nil {
|
||||
continue FOR_LOOP // Peer has since been disconnected.
|
||||
}
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcBlockRequestMessage{request.Height})
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
// We couldn't make the request, send-queue full.
|
||||
// The pool handles timeouts, just let it go.
|
||||
continue FOR_LOOP
|
||||
}
|
||||
|
||||
case err := <-bcR.errorsCh:
|
||||
peer := bcR.Switch.Peers().Get(err.peerID)
|
||||
if peer != nil {
|
||||
bcR.Switch.StopPeerForError(peer, err)
|
||||
}
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// ask for status updates
|
||||
go bcR.BroadcastStatusRequest() // nolint: errcheck
|
||||
|
||||
case <-switchToConsensusTicker.C:
|
||||
height, numPending, lenRequesters := bcR.pool.GetStatus()
|
||||
outbound, inbound, _ := bcR.Switch.NumPeers()
|
||||
@@ -263,13 +266,13 @@ FOR_LOOP:
|
||||
if bcR.pool.IsCaughtUp() {
|
||||
bcR.Logger.Info("Time to switch to consensus reactor!", "height", height)
|
||||
bcR.pool.Stop()
|
||||
|
||||
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(state, blocksSynced)
|
||||
} else {
|
||||
// should only happen during testing
|
||||
}
|
||||
// else {
|
||||
// should only happen during testing
|
||||
// }
|
||||
|
||||
break FOR_LOOP
|
||||
}
|
||||
@@ -302,7 +305,7 @@ FOR_LOOP:
|
||||
|
||||
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstPartsHeader := firstParts.Header()
|
||||
firstID := types.BlockID{first.Hash(), firstPartsHeader}
|
||||
firstID := types.BlockID{Hash: first.Hash(), PartsHeader: firstPartsHeader}
|
||||
// Finally, verify the first block using the second's commit
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
@@ -316,14 +319,14 @@ FOR_LOOP:
|
||||
if peer != nil {
|
||||
// NOTE: we've already removed the peer's request, but we
|
||||
// still need to clean up the rest.
|
||||
bcR.Switch.StopPeerForError(peer, fmt.Errorf("BlockchainReactor validation error: %v", err))
|
||||
bcR.Switch.StopPeerForError(peer, fmt.Errorf("blockchainReactor validation error: %v", err))
|
||||
}
|
||||
peerID2 := bcR.pool.RedoRequest(second.Height)
|
||||
peer2 := bcR.Switch.Peers().Get(peerID2)
|
||||
if peer2 != nil && peer2 != peer {
|
||||
// NOTE: we've already removed the peer's request, but we
|
||||
// still need to clean up the rest.
|
||||
bcR.Switch.StopPeerForError(peer2, fmt.Errorf("BlockchainReactor validation error: %v", err))
|
||||
bcR.Switch.StopPeerForError(peer2, fmt.Errorf("blockchainReactor validation error: %v", err))
|
||||
}
|
||||
continue FOR_LOOP
|
||||
} else {
|
||||
@@ -338,8 +341,7 @@ FOR_LOOP:
|
||||
state, err = bcR.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO This is bad, are we zombie?
|
||||
cmn.PanicQ(fmt.Sprintf("Failed to process committed block (%d:%X): %v",
|
||||
first.Height, first.Hash(), err))
|
||||
panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
blocksSynced++
|
||||
|
||||
@@ -373,6 +375,7 @@ type BlockchainMessage interface {
|
||||
ValidateBasic() error
|
||||
}
|
||||
|
||||
// RegisterBlockchainMessages registers the fast sync messages for amino encoding.
|
||||
func RegisterBlockchainMessages(cdc *amino.Codec) {
|
||||
cdc.RegisterInterface((*BlockchainMessage)(nil), nil)
|
||||
cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/blockchain/BlockRequest", nil)
|
||||
@@ -384,7 +387,7 @@ func RegisterBlockchainMessages(cdc *amino.Codec) {
|
||||
|
||||
func decodeMsg(bz []byte) (msg BlockchainMessage, err error) {
|
||||
if len(bz) > maxMsgSize {
|
||||
return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
|
||||
return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
|
||||
}
|
||||
err = cdc.UnmarshalBinaryBare(bz, &msg)
|
||||
return
|
||||
@@ -399,7 +402,7 @@ type bcBlockRequestMessage struct {
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcBlockRequestMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("Negative Height")
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -415,13 +418,13 @@ type bcNoBlockResponseMessage struct {
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcNoBlockResponseMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("Negative Height")
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (brm *bcNoBlockResponseMessage) String() string {
|
||||
return fmt.Sprintf("[bcNoBlockResponseMessage %d]", brm.Height)
|
||||
func (m *bcNoBlockResponseMessage) String() string {
|
||||
return fmt.Sprintf("[bcNoBlockResponseMessage %d]", m.Height)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
@@ -448,7 +451,7 @@ type bcStatusRequestMessage struct {
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcStatusRequestMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("Negative Height")
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -466,7 +469,7 @@ type bcStatusResponseMessage struct {
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcStatusResponseMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("Negative Height")
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,22 +1,26 @@
|
||||
package blockchain
|
||||
package v0
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
var config *cfg.Config
|
||||
@@ -41,30 +45,16 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
func makeVote(header *types.Header, blockID types.BlockID, valset *types.ValidatorSet, privVal types.PrivValidator) *types.Vote {
|
||||
addr := privVal.GetPubKey().Address()
|
||||
idx, _ := valset.GetByAddress(addr)
|
||||
vote := &types.Vote{
|
||||
ValidatorAddress: addr,
|
||||
ValidatorIndex: idx,
|
||||
Height: header.Height,
|
||||
Round: 1,
|
||||
Timestamp: tmtime.Now(),
|
||||
Type: types.PrecommitType,
|
||||
BlockID: blockID,
|
||||
}
|
||||
|
||||
privVal.SignVote(header.ChainID, vote)
|
||||
|
||||
return vote
|
||||
}
|
||||
|
||||
type BlockchainReactorPair struct {
|
||||
reactor *BlockchainReactor
|
||||
app proxy.AppConns
|
||||
}
|
||||
|
||||
func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals []types.PrivValidator, maxBlockHeight int64) BlockchainReactorPair {
|
||||
func newBlockchainReactor(
|
||||
logger log.Logger,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64) BlockchainReactorPair {
|
||||
if len(privVals) != 1 {
|
||||
panic("only support one validator")
|
||||
}
|
||||
@@ -74,44 +64,55 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
if err != nil {
|
||||
panic(cmn.ErrorWrap(err, "error start app"))
|
||||
panic(errors.Wrap(err, "error start app"))
|
||||
}
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockStore := NewBlockStore(blockDB)
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
||||
if err != nil {
|
||||
panic(cmn.ErrorWrap(err, "error constructing state from genesis file"))
|
||||
panic(errors.Wrap(err, "error constructing state from genesis file"))
|
||||
}
|
||||
|
||||
// Make the BlockchainReactor itself.
|
||||
// NOTE we have to create and commit the blocks first because
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
blockExec := sm.NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), proxyApp.Consensus(),
|
||||
sm.MockMempool{}, sm.MockEvidencePool{})
|
||||
db := dbm.NewMemDB()
|
||||
blockExec := sm.NewBlockExecutor(db, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.MockEvidencePool{})
|
||||
sm.SaveState(db, state)
|
||||
|
||||
// let's add some blocks in
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := &types.Commit{}
|
||||
lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil)
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
|
||||
vote := makeVote(&lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0])
|
||||
lastCommit = &types.Commit{Precommits: []*types.Vote{vote}, BlockID: lastBlockMeta.BlockID}
|
||||
vote, err := types.MakeVote(
|
||||
lastBlock.Header.Height,
|
||||
lastBlockMeta.BlockID,
|
||||
state.Validators,
|
||||
privVals[0],
|
||||
lastBlock.Header.ChainID)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
lastCommit = types.NewCommit(vote.Height, vote.Round,
|
||||
lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{thisBlock.Hash(), thisParts.Header()}
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartsHeader: thisParts.Header()}
|
||||
|
||||
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
if err != nil {
|
||||
panic(cmn.ErrorWrap(err, "error apply block"))
|
||||
panic(errors.Wrap(err, "error apply block"))
|
||||
}
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
@@ -125,6 +126,7 @@ func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals
|
||||
|
||||
func TestNoBlockResponse(t *testing.T) {
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
maxBlockHeight := int64(65)
|
||||
@@ -184,6 +186,7 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
// that seems extreme.
|
||||
func TestBadBlockStopsPeer(t *testing.T) {
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
maxBlockHeight := int64(148)
|
||||
@@ -252,6 +255,86 @@ func TestBadBlockStopsPeer(t *testing.T) {
|
||||
assert.True(t, lastReactorPair.reactor.Switch.Peers().Size() < len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
func TestBcBlockRequestMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
requestHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Request Message", 0, false},
|
||||
{"Valid Request Message", 1, false},
|
||||
{"Invalid Request Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
request := bcBlockRequestMessage{Height: tc.requestHeight}
|
||||
assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcNoBlockResponseMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
nonResponseHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Non-Response Message", 0, false},
|
||||
{"Valid Non-Response Message", 1, false},
|
||||
{"Invalid Non-Response Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
nonResponse := bcNoBlockResponseMessage{Height: tc.nonResponseHeight}
|
||||
assert.Equal(t, tc.expectErr, nonResponse.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcStatusRequestMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
requestHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Request Message", 0, false},
|
||||
{"Valid Request Message", 1, false},
|
||||
{"Invalid Request Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
request := bcStatusRequestMessage{Height: tc.requestHeight}
|
||||
assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcStatusResponseMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
responseHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Response Message", 0, false},
|
||||
{"Valid Response Message", 1, false},
|
||||
{"Invalid Response Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
response := bcStatusResponseMessage{Height: tc.responseHeight}
|
||||
assert.Equal(t, tc.expectErr, response.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// utility funcs
|
||||
|
||||
@@ -285,11 +368,11 @@ func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock {
|
||||
return abci.ResponseEndBlock{}
|
||||
}
|
||||
|
||||
func (app *testApp) DeliverTx(tx []byte) abci.ResponseDeliverTx {
|
||||
return abci.ResponseDeliverTx{Tags: []cmn.KVPair{}}
|
||||
func (app *testApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
|
||||
return abci.ResponseDeliverTx{Events: []abci.Event{}}
|
||||
}
|
||||
|
||||
func (app *testApp) CheckTx(tx []byte) abci.ResponseCheckTx {
|
||||
func (app *testApp) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
|
||||
return abci.ResponseCheckTx{}
|
||||
}
|
||||
|
||||
13
blockchain/v1/codec.go
Normal file
13
blockchain/v1/codec.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var cdc = amino.NewCodec()
|
||||
|
||||
func init() {
|
||||
RegisterBlockchainMessages(cdc)
|
||||
types.RegisterBlockAmino(cdc)
|
||||
}
|
||||
209
blockchain/v1/peer.go
Normal file
209
blockchain/v1/peer.go
Normal file
@@ -0,0 +1,209 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
flow "github.com/tendermint/tendermint/libs/flowrate"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
//--------
|
||||
// Peer
|
||||
|
||||
// BpPeerParams stores the peer parameters that are used when creating a peer.
|
||||
type BpPeerParams struct {
|
||||
timeout time.Duration
|
||||
minRecvRate int64
|
||||
sampleRate time.Duration
|
||||
windowSize time.Duration
|
||||
}
|
||||
|
||||
// BpPeer is the datastructure associated with a fast sync peer.
|
||||
type BpPeer struct {
|
||||
logger log.Logger
|
||||
ID p2p.ID
|
||||
|
||||
Height int64 // the peer reported height
|
||||
NumPendingBlockRequests int // number of requests still waiting for block responses
|
||||
blocks map[int64]*types.Block // blocks received or expected to be received from this peer
|
||||
blockResponseTimer *time.Timer
|
||||
recvMonitor *flow.Monitor
|
||||
params *BpPeerParams // parameters for timer and monitor
|
||||
|
||||
onErr func(err error, peerID p2p.ID) // function to call on error
|
||||
}
|
||||
|
||||
// NewBpPeer creates a new peer.
|
||||
func NewBpPeer(
|
||||
peerID p2p.ID, height int64, onErr func(err error, peerID p2p.ID), params *BpPeerParams) *BpPeer {
|
||||
|
||||
if params == nil {
|
||||
params = BpPeerDefaultParams()
|
||||
}
|
||||
return &BpPeer{
|
||||
ID: peerID,
|
||||
Height: height,
|
||||
blocks: make(map[int64]*types.Block, maxRequestsPerPeer),
|
||||
logger: log.NewNopLogger(),
|
||||
onErr: onErr,
|
||||
params: params,
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a string representation of a peer.
|
||||
func (peer *BpPeer) String() string {
|
||||
return fmt.Sprintf("peer: %v height: %v pending: %v", peer.ID, peer.Height, peer.NumPendingBlockRequests)
|
||||
}
|
||||
|
||||
// SetLogger sets the logger of the peer.
|
||||
func (peer *BpPeer) SetLogger(l log.Logger) {
|
||||
peer.logger = l
|
||||
}
|
||||
|
||||
// Cleanup performs cleanup of the peer, removes blocks, requests, stops timer and monitor.
|
||||
func (peer *BpPeer) Cleanup() {
|
||||
if peer.blockResponseTimer != nil {
|
||||
peer.blockResponseTimer.Stop()
|
||||
}
|
||||
if peer.NumPendingBlockRequests != 0 {
|
||||
peer.logger.Info("peer with pending requests is being cleaned", "peer", peer.ID)
|
||||
}
|
||||
if len(peer.blocks)-peer.NumPendingBlockRequests != 0 {
|
||||
peer.logger.Info("peer with pending blocks is being cleaned", "peer", peer.ID)
|
||||
}
|
||||
for h := range peer.blocks {
|
||||
delete(peer.blocks, h)
|
||||
}
|
||||
peer.NumPendingBlockRequests = 0
|
||||
peer.recvMonitor = nil
|
||||
}
|
||||
|
||||
// BlockAtHeight returns the block at a given height if available and errMissingBlock otherwise.
|
||||
func (peer *BpPeer) BlockAtHeight(height int64) (*types.Block, error) {
|
||||
block, ok := peer.blocks[height]
|
||||
if !ok {
|
||||
return nil, errMissingBlock
|
||||
}
|
||||
if block == nil {
|
||||
return nil, errMissingBlock
|
||||
}
|
||||
return peer.blocks[height], nil
|
||||
}
|
||||
|
||||
// AddBlock adds a block at peer level. Block must be non-nil and recvSize a positive integer
|
||||
// The peer must have a pending request for this block.
|
||||
func (peer *BpPeer) AddBlock(block *types.Block, recvSize int) error {
|
||||
if block == nil || recvSize < 0 {
|
||||
panic("bad parameters")
|
||||
}
|
||||
existingBlock, ok := peer.blocks[block.Height]
|
||||
if !ok {
|
||||
peer.logger.Error("unsolicited block", "blockHeight", block.Height, "peer", peer.ID)
|
||||
return errMissingBlock
|
||||
}
|
||||
if existingBlock != nil {
|
||||
peer.logger.Error("already have a block for height", "height", block.Height)
|
||||
return errDuplicateBlock
|
||||
}
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
panic("peer does not have pending requests")
|
||||
}
|
||||
peer.blocks[block.Height] = block
|
||||
peer.NumPendingBlockRequests--
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
peer.stopMonitor()
|
||||
peer.stopBlockResponseTimer()
|
||||
} else {
|
||||
peer.recvMonitor.Update(recvSize)
|
||||
peer.resetBlockResponseTimer()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveBlock removes the block of given height
|
||||
func (peer *BpPeer) RemoveBlock(height int64) {
|
||||
delete(peer.blocks, height)
|
||||
}
|
||||
|
||||
// RequestSent records that a request was sent, and starts the peer timer and monitor if needed.
|
||||
func (peer *BpPeer) RequestSent(height int64) {
|
||||
peer.blocks[height] = nil
|
||||
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
peer.startMonitor()
|
||||
peer.resetBlockResponseTimer()
|
||||
}
|
||||
peer.NumPendingBlockRequests++
|
||||
}
|
||||
|
||||
// CheckRate verifies that the response rate of the peer is acceptable (higher than the minimum allowed).
|
||||
func (peer *BpPeer) CheckRate() error {
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
return nil
|
||||
}
|
||||
curRate := peer.recvMonitor.Status().CurRate
|
||||
// curRate can be 0 on start
|
||||
if curRate != 0 && curRate < peer.params.minRecvRate {
|
||||
err := errSlowPeer
|
||||
peer.logger.Error("SendTimeout", "peer", peer,
|
||||
"reason", err,
|
||||
"curRate", fmt.Sprintf("%d KB/s", curRate/1024),
|
||||
"minRate", fmt.Sprintf("%d KB/s", peer.params.minRecvRate/1024))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (peer *BpPeer) onTimeout() {
|
||||
peer.onErr(errNoPeerResponse, peer.ID)
|
||||
}
|
||||
|
||||
func (peer *BpPeer) stopMonitor() {
|
||||
peer.recvMonitor.Done()
|
||||
peer.recvMonitor = nil
|
||||
}
|
||||
|
||||
func (peer *BpPeer) startMonitor() {
|
||||
peer.recvMonitor = flow.New(peer.params.sampleRate, peer.params.windowSize)
|
||||
initialValue := float64(peer.params.minRecvRate) * math.E
|
||||
peer.recvMonitor.SetREMA(initialValue)
|
||||
}
|
||||
|
||||
func (peer *BpPeer) resetBlockResponseTimer() {
|
||||
if peer.blockResponseTimer == nil {
|
||||
peer.blockResponseTimer = time.AfterFunc(peer.params.timeout, peer.onTimeout)
|
||||
} else {
|
||||
peer.blockResponseTimer.Reset(peer.params.timeout)
|
||||
}
|
||||
}
|
||||
|
||||
func (peer *BpPeer) stopBlockResponseTimer() bool {
|
||||
if peer.blockResponseTimer == nil {
|
||||
return false
|
||||
}
|
||||
return peer.blockResponseTimer.Stop()
|
||||
}
|
||||
|
||||
// BpPeerDefaultParams returns the default peer parameters.
|
||||
func BpPeerDefaultParams() *BpPeerParams {
|
||||
return &BpPeerParams{
|
||||
// Timeout for a peer to respond to a block request.
|
||||
timeout: 15 * time.Second,
|
||||
|
||||
// Minimum recv rate to ensure we're receiving blocks from a peer fast
|
||||
// enough. If a peer is not sending data at at least that rate, we
|
||||
// consider them to have timedout and we disconnect.
|
||||
//
|
||||
// Assuming a DSL connection (not a good choice) 128 Kbps (upload) ~ 15 KB/s,
|
||||
// sending data across atlantic ~ 7.5 KB/s.
|
||||
minRecvRate: int64(7680),
|
||||
|
||||
// Monitor parameters
|
||||
sampleRate: time.Second,
|
||||
windowSize: 40 * time.Second,
|
||||
}
|
||||
}
|
||||
280
blockchain/v1/peer_test.go
Normal file
280
blockchain/v1/peer_test.go
Normal file
@@ -0,0 +1,280 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestPeerMonitor(t *testing.T) {
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
nil)
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
peer.startMonitor()
|
||||
assert.NotNil(t, peer.recvMonitor)
|
||||
peer.stopMonitor()
|
||||
assert.Nil(t, peer.recvMonitor)
|
||||
}
|
||||
|
||||
func TestPeerResetBlockResponseTimer(t *testing.T) {
|
||||
var (
|
||||
numErrFuncCalls int // number of calls to the errFunc
|
||||
lastErr error // last generated error
|
||||
peerTestMtx sync.Mutex // modifications of ^^ variables are also done from timer handler goroutine
|
||||
)
|
||||
params := &BpPeerParams{timeout: 2 * time.Millisecond}
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 10,
|
||||
func(err error, _ p2p.ID) {
|
||||
peerTestMtx.Lock()
|
||||
defer peerTestMtx.Unlock()
|
||||
lastErr = err
|
||||
numErrFuncCalls++
|
||||
},
|
||||
params)
|
||||
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
checkByStoppingPeerTimer(t, peer, false)
|
||||
|
||||
// initial reset call with peer having a nil timer
|
||||
peer.resetBlockResponseTimer()
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
// make sure timer is running and stop it
|
||||
checkByStoppingPeerTimer(t, peer, true)
|
||||
|
||||
// reset with running timer
|
||||
peer.resetBlockResponseTimer()
|
||||
time.Sleep(time.Millisecond)
|
||||
peer.resetBlockResponseTimer()
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
|
||||
// let the timer expire and ...
|
||||
time.Sleep(3 * time.Millisecond)
|
||||
// ... check timer is not running
|
||||
checkByStoppingPeerTimer(t, peer, false)
|
||||
|
||||
peerTestMtx.Lock()
|
||||
// ... check errNoPeerResponse has been sent
|
||||
assert.Equal(t, 1, numErrFuncCalls)
|
||||
assert.Equal(t, lastErr, errNoPeerResponse)
|
||||
peerTestMtx.Unlock()
|
||||
}
|
||||
|
||||
func TestPeerRequestSent(t *testing.T) {
|
||||
params := &BpPeerParams{timeout: 2 * time.Millisecond}
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
params)
|
||||
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
peer.RequestSent(1)
|
||||
assert.NotNil(t, peer.recvMonitor)
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
assert.Equal(t, 1, peer.NumPendingBlockRequests)
|
||||
|
||||
peer.RequestSent(1)
|
||||
assert.NotNil(t, peer.recvMonitor)
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
assert.Equal(t, 2, peer.NumPendingBlockRequests)
|
||||
}
|
||||
|
||||
func TestPeerGetAndRemoveBlock(t *testing.T) {
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 100,
|
||||
func(err error, _ p2p.ID) {},
|
||||
nil)
|
||||
|
||||
// Change peer height
|
||||
peer.Height = int64(10)
|
||||
assert.Equal(t, int64(10), peer.Height)
|
||||
|
||||
// request some blocks and receive few of them
|
||||
for i := 1; i <= 10; i++ {
|
||||
peer.RequestSent(int64(i))
|
||||
if i > 5 {
|
||||
// only receive blocks 1..5
|
||||
continue
|
||||
}
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 10)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
height int64
|
||||
wantErr error
|
||||
blockPresent bool
|
||||
}{
|
||||
{"no request", 100, errMissingBlock, false},
|
||||
{"no block", 6, errMissingBlock, false},
|
||||
{"block 1 present", 1, nil, true},
|
||||
{"block max present", 5, nil, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// try to get the block
|
||||
b, err := peer.BlockAtHeight(tt.height)
|
||||
assert.Equal(t, tt.wantErr, err)
|
||||
assert.Equal(t, tt.blockPresent, b != nil)
|
||||
|
||||
// remove the block
|
||||
peer.RemoveBlock(tt.height)
|
||||
_, err = peer.BlockAtHeight(tt.height)
|
||||
assert.Equal(t, errMissingBlock, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerAddBlock(t *testing.T) {
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 100,
|
||||
func(err error, _ p2p.ID) {},
|
||||
nil)
|
||||
|
||||
// request some blocks, receive one
|
||||
for i := 1; i <= 10; i++ {
|
||||
peer.RequestSent(int64(i))
|
||||
if i == 5 {
|
||||
// receive block 5
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 10)
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
height int64
|
||||
wantErr error
|
||||
blockPresent bool
|
||||
}{
|
||||
{"no request", 50, errMissingBlock, false},
|
||||
{"duplicate block", 5, errDuplicateBlock, true},
|
||||
{"block 1 successfully received", 1, nil, true},
|
||||
{"block max successfully received", 10, nil, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// try to get the block
|
||||
err := peer.AddBlock(makeSmallBlock(int(tt.height)), 10)
|
||||
assert.Equal(t, tt.wantErr, err)
|
||||
_, err = peer.BlockAtHeight(tt.height)
|
||||
assert.Equal(t, tt.blockPresent, err == nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerOnErrFuncCalledDueToExpiration(t *testing.T) {
|
||||
|
||||
params := &BpPeerParams{timeout: 2 * time.Millisecond}
|
||||
var (
|
||||
numErrFuncCalls int // number of calls to the onErr function
|
||||
lastErr error // last generated error
|
||||
peerTestMtx sync.Mutex // modifications of ^^ variables are also done from timer handler goroutine
|
||||
)
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 10,
|
||||
func(err error, _ p2p.ID) {
|
||||
peerTestMtx.Lock()
|
||||
defer peerTestMtx.Unlock()
|
||||
lastErr = err
|
||||
numErrFuncCalls++
|
||||
},
|
||||
params)
|
||||
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
peer.RequestSent(1)
|
||||
time.Sleep(4 * time.Millisecond)
|
||||
// timer should have expired by now, check that the on error function was called
|
||||
peerTestMtx.Lock()
|
||||
assert.Equal(t, 1, numErrFuncCalls)
|
||||
assert.Equal(t, errNoPeerResponse, lastErr)
|
||||
peerTestMtx.Unlock()
|
||||
}
|
||||
|
||||
func TestPeerCheckRate(t *testing.T) {
|
||||
params := &BpPeerParams{
|
||||
timeout: time.Second,
|
||||
minRecvRate: int64(100), // 100 bytes/sec exponential moving average
|
||||
}
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
params)
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
require.Nil(t, peer.CheckRate())
|
||||
|
||||
for i := 0; i < 40; i++ {
|
||||
peer.RequestSent(int64(i))
|
||||
}
|
||||
|
||||
// monitor starts with a higher rEMA (~ 2*minRecvRate), wait for it to go down
|
||||
time.Sleep(900 * time.Millisecond)
|
||||
|
||||
// normal peer - send a bit more than 100 bytes/sec, > 10 bytes/100msec, check peer is not considered slow
|
||||
for i := 0; i < 10; i++ {
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 11)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
require.Nil(t, peer.CheckRate())
|
||||
}
|
||||
|
||||
// slow peer - send a bit less than 10 bytes/100msec
|
||||
for i := 10; i < 20; i++ {
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 9)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
// check peer is considered slow
|
||||
assert.Equal(t, errSlowPeer, peer.CheckRate())
|
||||
}
|
||||
|
||||
func TestPeerCleanup(t *testing.T) {
|
||||
params := &BpPeerParams{timeout: 2 * time.Millisecond}
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
params)
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
assert.Nil(t, peer.blockResponseTimer)
|
||||
peer.RequestSent(1)
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
|
||||
peer.Cleanup()
|
||||
checkByStoppingPeerTimer(t, peer, false)
|
||||
}
|
||||
|
||||
// Check if peer timer is running or not (a running timer can be successfully stopped).
|
||||
// Note: stops the timer.
|
||||
func checkByStoppingPeerTimer(t *testing.T, peer *BpPeer, running bool) {
|
||||
assert.NotPanics(t, func() {
|
||||
stopped := peer.stopBlockResponseTimer()
|
||||
if running {
|
||||
assert.True(t, stopped)
|
||||
} else {
|
||||
assert.False(t, stopped)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func makeSmallBlock(height int) *types.Block {
|
||||
return types.MakeBlock(int64(height), []types.Tx{types.Tx("foo")}, nil, nil)
|
||||
}
|
||||
369
blockchain/v1/pool.go
Normal file
369
blockchain/v1/pool.go
Normal file
@@ -0,0 +1,369 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// BlockPool keeps track of the fast sync peers, block requests and block responses.
|
||||
type BlockPool struct {
|
||||
logger log.Logger
|
||||
// Set of peers that have sent status responses, with height bigger than pool.Height
|
||||
peers map[p2p.ID]*BpPeer
|
||||
// Set of block heights and the corresponding peers from where a block response is expected or has been received.
|
||||
blocks map[int64]p2p.ID
|
||||
|
||||
plannedRequests map[int64]struct{} // list of blocks to be assigned peers for blockRequest
|
||||
nextRequestHeight int64 // next height to be added to plannedRequests
|
||||
|
||||
Height int64 // height of next block to execute
|
||||
MaxPeerHeight int64 // maximum height of all peers
|
||||
toBcR bcReactor
|
||||
}
|
||||
|
||||
// NewBlockPool creates a new BlockPool.
|
||||
func NewBlockPool(height int64, toBcR bcReactor) *BlockPool {
|
||||
return &BlockPool{
|
||||
Height: height,
|
||||
MaxPeerHeight: 0,
|
||||
peers: make(map[p2p.ID]*BpPeer),
|
||||
blocks: make(map[int64]p2p.ID),
|
||||
plannedRequests: make(map[int64]struct{}),
|
||||
nextRequestHeight: height,
|
||||
toBcR: toBcR,
|
||||
}
|
||||
}
|
||||
|
||||
// SetLogger sets the logger of the pool.
|
||||
func (pool *BlockPool) SetLogger(l log.Logger) {
|
||||
pool.logger = l
|
||||
}
|
||||
|
||||
// ReachedMaxHeight check if the pool has reached the maximum peer height.
|
||||
func (pool *BlockPool) ReachedMaxHeight() bool {
|
||||
return pool.Height >= pool.MaxPeerHeight
|
||||
}
|
||||
|
||||
func (pool *BlockPool) rescheduleRequest(peerID p2p.ID, height int64) {
|
||||
pool.logger.Info("reschedule requests made to peer for height ", "peerID", peerID, "height", height)
|
||||
pool.plannedRequests[height] = struct{}{}
|
||||
delete(pool.blocks, height)
|
||||
pool.peers[peerID].RemoveBlock(height)
|
||||
}
|
||||
|
||||
// Updates the pool's max height. If no peers are left MaxPeerHeight is set to 0.
|
||||
func (pool *BlockPool) updateMaxPeerHeight() {
|
||||
var newMax int64
|
||||
for _, peer := range pool.peers {
|
||||
peerHeight := peer.Height
|
||||
if peerHeight > newMax {
|
||||
newMax = peerHeight
|
||||
}
|
||||
}
|
||||
pool.MaxPeerHeight = newMax
|
||||
}
|
||||
|
||||
// UpdatePeer adds a new peer or updates an existing peer with a new height.
|
||||
// If a peer is short it is not added.
|
||||
func (pool *BlockPool) UpdatePeer(peerID p2p.ID, height int64) error {
|
||||
|
||||
peer := pool.peers[peerID]
|
||||
|
||||
if peer == nil {
|
||||
if height < pool.Height {
|
||||
pool.logger.Info("Peer height too small",
|
||||
"peer", peerID, "height", height, "fsm_height", pool.Height)
|
||||
return errPeerTooShort
|
||||
}
|
||||
// Add new peer.
|
||||
peer = NewBpPeer(peerID, height, pool.toBcR.sendPeerError, nil)
|
||||
peer.SetLogger(pool.logger.With("peer", peerID))
|
||||
pool.peers[peerID] = peer
|
||||
pool.logger.Info("added peer", "peerID", peerID, "height", height, "num_peers", len(pool.peers))
|
||||
} else {
|
||||
// Check if peer is lowering its height. This is not allowed.
|
||||
if height < peer.Height {
|
||||
pool.RemovePeer(peerID, errPeerLowersItsHeight)
|
||||
return errPeerLowersItsHeight
|
||||
}
|
||||
// Update existing peer.
|
||||
peer.Height = height
|
||||
}
|
||||
|
||||
// Update the pool's MaxPeerHeight if needed.
|
||||
pool.updateMaxPeerHeight()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleans and deletes the peer. Recomputes the max peer height.
|
||||
func (pool *BlockPool) deletePeer(peer *BpPeer) {
|
||||
if peer == nil {
|
||||
return
|
||||
}
|
||||
peer.Cleanup()
|
||||
delete(pool.peers, peer.ID)
|
||||
|
||||
if peer.Height == pool.MaxPeerHeight {
|
||||
pool.updateMaxPeerHeight()
|
||||
}
|
||||
}
|
||||
|
||||
// RemovePeer removes the blocks and requests from the peer, reschedules them and deletes the peer.
|
||||
func (pool *BlockPool) RemovePeer(peerID p2p.ID, err error) {
|
||||
peer := pool.peers[peerID]
|
||||
if peer == nil {
|
||||
return
|
||||
}
|
||||
pool.logger.Info("removing peer", "peerID", peerID, "error", err)
|
||||
|
||||
// Reschedule the block requests made to the peer, or received and not processed yet.
|
||||
// Note that some of the requests may be removed further down.
|
||||
for h := range pool.peers[peerID].blocks {
|
||||
pool.rescheduleRequest(peerID, h)
|
||||
}
|
||||
|
||||
oldMaxPeerHeight := pool.MaxPeerHeight
|
||||
// Delete the peer. This operation may result in the pool's MaxPeerHeight being lowered.
|
||||
pool.deletePeer(peer)
|
||||
|
||||
// Check if the pool's MaxPeerHeight has been lowered.
|
||||
// This may happen if the tallest peer has been removed.
|
||||
if oldMaxPeerHeight > pool.MaxPeerHeight {
|
||||
// Remove any planned requests for heights over the new MaxPeerHeight.
|
||||
for h := range pool.plannedRequests {
|
||||
if h > pool.MaxPeerHeight {
|
||||
delete(pool.plannedRequests, h)
|
||||
}
|
||||
}
|
||||
// Adjust the nextRequestHeight to the new max plus one.
|
||||
if pool.nextRequestHeight > pool.MaxPeerHeight {
|
||||
pool.nextRequestHeight = pool.MaxPeerHeight + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) removeShortPeers() {
|
||||
for _, peer := range pool.peers {
|
||||
if peer.Height < pool.Height {
|
||||
pool.RemovePeer(peer.ID, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) removeBadPeers() {
|
||||
pool.removeShortPeers()
|
||||
for _, peer := range pool.peers {
|
||||
if err := peer.CheckRate(); err != nil {
|
||||
pool.RemovePeer(peer.ID, err)
|
||||
pool.toBcR.sendPeerError(err, peer.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MakeNextRequests creates more requests if the block pool is running low.
|
||||
func (pool *BlockPool) MakeNextRequests(maxNumRequests int) {
|
||||
heights := pool.makeRequestBatch(maxNumRequests)
|
||||
if len(heights) != 0 {
|
||||
pool.logger.Info("makeNextRequests will make following requests",
|
||||
"number", len(heights), "heights", heights)
|
||||
}
|
||||
|
||||
for _, height := range heights {
|
||||
h := int64(height)
|
||||
if !pool.sendRequest(h) {
|
||||
// If a good peer was not found for sending the request at height h then return,
|
||||
// as it shouldn't be possible to find a peer for h+1.
|
||||
return
|
||||
}
|
||||
delete(pool.plannedRequests, h)
|
||||
}
|
||||
}
|
||||
|
||||
// Makes a batch of requests sorted by height such that the block pool has up to maxNumRequests entries.
|
||||
func (pool *BlockPool) makeRequestBatch(maxNumRequests int) []int {
|
||||
pool.removeBadPeers()
|
||||
// At this point pool.requests may include heights for requests to be redone due to removal of peers:
|
||||
// - peers timed out or were removed by switch
|
||||
// - FSM timed out on waiting to advance the block execution due to missing blocks at h or h+1
|
||||
// Determine the number of requests needed by subtracting the number of requests already made from the maximum
|
||||
// allowed
|
||||
numNeeded := maxNumRequests - len(pool.blocks)
|
||||
for len(pool.plannedRequests) < numNeeded {
|
||||
if pool.nextRequestHeight > pool.MaxPeerHeight {
|
||||
break
|
||||
}
|
||||
pool.plannedRequests[pool.nextRequestHeight] = struct{}{}
|
||||
pool.nextRequestHeight++
|
||||
}
|
||||
|
||||
heights := make([]int, 0, len(pool.plannedRequests))
|
||||
for k := range pool.plannedRequests {
|
||||
heights = append(heights, int(k))
|
||||
}
|
||||
sort.Ints(heights)
|
||||
return heights
|
||||
}
|
||||
|
||||
func (pool *BlockPool) sendRequest(height int64) bool {
|
||||
for _, peer := range pool.peers {
|
||||
if peer.NumPendingBlockRequests >= maxRequestsPerPeer {
|
||||
continue
|
||||
}
|
||||
if peer.Height < height {
|
||||
continue
|
||||
}
|
||||
|
||||
err := pool.toBcR.sendBlockRequest(peer.ID, height)
|
||||
if err == errNilPeerForBlockRequest {
|
||||
// Switch does not have this peer, remove it and continue to look for another peer.
|
||||
pool.logger.Error("switch does not have peer..removing peer selected for height", "peer",
|
||||
peer.ID, "height", height)
|
||||
pool.RemovePeer(peer.ID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err == errSendQueueFull {
|
||||
pool.logger.Error("peer queue is full", "peer", peer.ID, "height", height)
|
||||
continue
|
||||
}
|
||||
|
||||
pool.logger.Info("assigned request to peer", "peer", peer.ID, "height", height)
|
||||
|
||||
pool.blocks[height] = peer.ID
|
||||
peer.RequestSent(height)
|
||||
|
||||
return true
|
||||
}
|
||||
pool.logger.Error("could not find peer to send request for block at height", "height", height)
|
||||
return false
|
||||
}
|
||||
|
||||
// AddBlock validates that the block comes from the peer it was expected from and stores it in the 'blocks' map.
|
||||
func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int) error {
|
||||
peer, ok := pool.peers[peerID]
|
||||
if !ok {
|
||||
pool.logger.Error("block from unknown peer", "height", block.Height, "peer", peerID)
|
||||
return errBadDataFromPeer
|
||||
}
|
||||
if wantPeerID, ok := pool.blocks[block.Height]; ok && wantPeerID != peerID {
|
||||
pool.logger.Error("block received from wrong peer", "height", block.Height,
|
||||
"peer", peerID, "expected_peer", wantPeerID)
|
||||
return errBadDataFromPeer
|
||||
}
|
||||
|
||||
return peer.AddBlock(block, blockSize)
|
||||
}
|
||||
|
||||
// BlockData stores the peer responsible to deliver a block and the actual block if delivered.
|
||||
type BlockData struct {
|
||||
block *types.Block
|
||||
peer *BpPeer
|
||||
}
|
||||
|
||||
// BlockAndPeerAtHeight retrieves the block and delivery peer at specified height.
|
||||
// Returns errMissingBlock if a block was not found
|
||||
func (pool *BlockPool) BlockAndPeerAtHeight(height int64) (bData *BlockData, err error) {
|
||||
peerID := pool.blocks[height]
|
||||
peer := pool.peers[peerID]
|
||||
if peer == nil {
|
||||
return nil, errMissingBlock
|
||||
}
|
||||
|
||||
block, err := peer.BlockAtHeight(height)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &BlockData{peer: peer, block: block}, nil
|
||||
|
||||
}
|
||||
|
||||
// FirstTwoBlocksAndPeers returns the blocks and the delivery peers at pool's height H and H+1.
|
||||
func (pool *BlockPool) FirstTwoBlocksAndPeers() (first, second *BlockData, err error) {
|
||||
first, err = pool.BlockAndPeerAtHeight(pool.Height)
|
||||
second, err2 := pool.BlockAndPeerAtHeight(pool.Height + 1)
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// InvalidateFirstTwoBlocks removes the peers that sent us the first two blocks, blocks are removed by RemovePeer().
|
||||
func (pool *BlockPool) InvalidateFirstTwoBlocks(err error) {
|
||||
first, err1 := pool.BlockAndPeerAtHeight(pool.Height)
|
||||
second, err2 := pool.BlockAndPeerAtHeight(pool.Height + 1)
|
||||
|
||||
if err1 == nil {
|
||||
pool.RemovePeer(first.peer.ID, err)
|
||||
}
|
||||
if err2 == nil {
|
||||
pool.RemovePeer(second.peer.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessedCurrentHeightBlock performs cleanup after a block is processed. It removes block at pool height and
|
||||
// the peers that are now short.
|
||||
func (pool *BlockPool) ProcessedCurrentHeightBlock() {
|
||||
peerID, peerOk := pool.blocks[pool.Height]
|
||||
if peerOk {
|
||||
pool.peers[peerID].RemoveBlock(pool.Height)
|
||||
}
|
||||
delete(pool.blocks, pool.Height)
|
||||
pool.logger.Debug("removed block at height", "height", pool.Height)
|
||||
pool.Height++
|
||||
pool.removeShortPeers()
|
||||
}
|
||||
|
||||
// RemovePeerAtCurrentHeights checks if a block at pool's height H exists and if not, it removes the
|
||||
// delivery peer and returns. If a block at height H exists then the check and peer removal is done for H+1.
|
||||
// This function is called when the FSM is not able to make progress for some time.
|
||||
// This happens if either the block H or H+1 have not been delivered.
|
||||
func (pool *BlockPool) RemovePeerAtCurrentHeights(err error) {
|
||||
peerID := pool.blocks[pool.Height]
|
||||
peer, ok := pool.peers[peerID]
|
||||
if ok {
|
||||
if _, err := peer.BlockAtHeight(pool.Height); err != nil {
|
||||
pool.logger.Info("remove peer that hasn't sent block at pool.Height",
|
||||
"peer", peerID, "height", pool.Height)
|
||||
pool.RemovePeer(peerID, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
peerID = pool.blocks[pool.Height+1]
|
||||
peer, ok = pool.peers[peerID]
|
||||
if ok {
|
||||
if _, err := peer.BlockAtHeight(pool.Height + 1); err != nil {
|
||||
pool.logger.Info("remove peer that hasn't sent block at pool.Height+1",
|
||||
"peer", peerID, "height", pool.Height+1)
|
||||
pool.RemovePeer(peerID, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup performs pool and peer cleanup
|
||||
func (pool *BlockPool) Cleanup() {
|
||||
for id, peer := range pool.peers {
|
||||
peer.Cleanup()
|
||||
delete(pool.peers, id)
|
||||
}
|
||||
pool.plannedRequests = make(map[int64]struct{})
|
||||
pool.blocks = make(map[int64]p2p.ID)
|
||||
pool.nextRequestHeight = 0
|
||||
pool.Height = 0
|
||||
pool.MaxPeerHeight = 0
|
||||
}
|
||||
|
||||
// NumPeers returns the number of peers in the pool
|
||||
func (pool *BlockPool) NumPeers() int {
|
||||
return len(pool.peers)
|
||||
}
|
||||
|
||||
// NeedsBlocks returns true if more blocks are required.
|
||||
func (pool *BlockPool) NeedsBlocks() bool {
|
||||
return len(pool.blocks) < maxNumRequests
|
||||
}
|
||||
671
blockchain/v1/pool_test.go
Normal file
671
blockchain/v1/pool_test.go
Normal file
@@ -0,0 +1,671 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type testPeer struct {
|
||||
id p2p.ID
|
||||
height int64
|
||||
}
|
||||
|
||||
type testBcR struct {
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
type testValues struct {
|
||||
numRequestsSent int
|
||||
}
|
||||
|
||||
var testResults testValues
|
||||
|
||||
func resetPoolTestResults() {
|
||||
testResults.numRequestsSent = 0
|
||||
}
|
||||
|
||||
func (testR *testBcR) sendPeerError(err error, peerID p2p.ID) {
|
||||
}
|
||||
|
||||
func (testR *testBcR) sendStatusRequest() {
|
||||
}
|
||||
|
||||
func (testR *testBcR) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
testResults.numRequestsSent++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (testR *testBcR) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
|
||||
}
|
||||
|
||||
func (testR *testBcR) switchToConsensus() {
|
||||
|
||||
}
|
||||
|
||||
func newTestBcR() *testBcR {
|
||||
testBcR := &testBcR{logger: log.TestingLogger()}
|
||||
return testBcR
|
||||
}
|
||||
|
||||
type tPBlocks struct {
|
||||
id p2p.ID
|
||||
create bool
|
||||
}
|
||||
|
||||
// Makes a block pool with specified current height, list of peers, block requests and block responses
|
||||
func makeBlockPool(bcr *testBcR, height int64, peers []BpPeer, blocks map[int64]tPBlocks) *BlockPool {
|
||||
bPool := NewBlockPool(height, bcr)
|
||||
bPool.SetLogger(bcr.logger)
|
||||
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
|
||||
var maxH int64
|
||||
for _, p := range peers {
|
||||
if p.Height > maxH {
|
||||
maxH = p.Height
|
||||
}
|
||||
bPool.peers[p.ID] = NewBpPeer(p.ID, p.Height, bcr.sendPeerError, nil)
|
||||
bPool.peers[p.ID].SetLogger(bcr.logger)
|
||||
|
||||
}
|
||||
bPool.MaxPeerHeight = maxH
|
||||
for h, p := range blocks {
|
||||
bPool.blocks[h] = p.id
|
||||
bPool.peers[p.id].RequestSent(h)
|
||||
if p.create {
|
||||
// simulate that a block at height h has been received
|
||||
_ = bPool.peers[p.id].AddBlock(types.MakeBlock(h, txs, nil, nil), 100)
|
||||
}
|
||||
}
|
||||
return bPool
|
||||
}
|
||||
|
||||
func assertPeerSetsEquivalent(t *testing.T, set1 map[p2p.ID]*BpPeer, set2 map[p2p.ID]*BpPeer) {
|
||||
assert.Equal(t, len(set1), len(set2))
|
||||
for peerID, peer1 := range set1 {
|
||||
peer2 := set2[peerID]
|
||||
assert.NotNil(t, peer2)
|
||||
assert.Equal(t, peer1.NumPendingBlockRequests, peer2.NumPendingBlockRequests)
|
||||
assert.Equal(t, peer1.Height, peer2.Height)
|
||||
assert.Equal(t, len(peer1.blocks), len(peer2.blocks))
|
||||
for h, block1 := range peer1.blocks {
|
||||
block2 := peer2.blocks[h]
|
||||
// block1 and block2 could be nil if a request was made but no block was received
|
||||
assert.Equal(t, block1, block2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func assertBlockPoolEquivalent(t *testing.T, poolWanted, pool *BlockPool) {
|
||||
assert.Equal(t, poolWanted.blocks, pool.blocks)
|
||||
assertPeerSetsEquivalent(t, poolWanted.peers, pool.peers)
|
||||
assert.Equal(t, poolWanted.MaxPeerHeight, pool.MaxPeerHeight)
|
||||
assert.Equal(t, poolWanted.Height, pool.Height)
|
||||
|
||||
}
|
||||
|
||||
func TestBlockPoolUpdatePeer(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
args testPeer
|
||||
poolWanted *BlockPool
|
||||
errWanted error
|
||||
}{
|
||||
{
|
||||
name: "add a first short peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 50},
|
||||
errWanted: errPeerTooShort,
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "add a first good peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 101},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 101}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "increase the height of P1 from 120 to 123",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 123},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 123}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "decrease the height of P1 from 120 to 110",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 110},
|
||||
errWanted: errPeerLowersItsHeight,
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "decrease the height of P1 from 105 to 102 with blocks",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 105}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 101: {"P1", true}, 102: {"P1", true}}),
|
||||
args: testPeer{"P1", 102},
|
||||
errWanted: errPeerLowersItsHeight,
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{},
|
||||
map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pool := tt.pool
|
||||
err := pool.UpdatePeer(tt.args.id, tt.args.height)
|
||||
assert.Equal(t, tt.errWanted, err)
|
||||
assert.Equal(t, tt.poolWanted.blocks, tt.pool.blocks)
|
||||
assertPeerSetsEquivalent(t, tt.poolWanted.peers, tt.pool.peers)
|
||||
assert.Equal(t, tt.poolWanted.MaxPeerHeight, tt.pool.MaxPeerHeight)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
err error
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
args args
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "attempt to delete non-existing peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: args{"P99", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the only peer without blocks",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the shortest of two peers without blocks",
|
||||
pool: makeBlockPool(
|
||||
testBcR,
|
||||
100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 120}},
|
||||
map[int64]tPBlocks{}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the tallest of two peers without blocks",
|
||||
pool: makeBlockPool(
|
||||
testBcR,
|
||||
100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 120}},
|
||||
map[int64]tPBlocks{}),
|
||||
args: args{"P2", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the only peer with block requests sent and blocks received",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the shortest of two peers with block requests sent and blocks received",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 200}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 200}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the tallest of two peers with block requests sent and blocks received",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 110}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 110}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.RemovePeer(tt.args.peerID, tt.args.err)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolRemoveShortPeers(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "no short peers",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 110}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 110}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
|
||||
{
|
||||
name: "one short peer",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 90}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
|
||||
{
|
||||
name: "all short peers",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 90}, {ID: "P2", Height: 91}, {ID: "P3", Height: 92}}, map[int64]tPBlocks{}),
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pool := tt.pool
|
||||
pool.removeShortPeers()
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolSendRequestBatch(t *testing.T) {
|
||||
type testPeerResult struct {
|
||||
id p2p.ID
|
||||
numPendingBlockRequests int
|
||||
}
|
||||
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
maxRequestsPerPeer int
|
||||
expRequests map[int64]bool
|
||||
expPeerResults []testPeerResult
|
||||
expnumPendingBlockRequests int
|
||||
}{
|
||||
{
|
||||
name: "one peer - send up to maxRequestsPerPeer block requests",
|
||||
pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
maxRequestsPerPeer: 2,
|
||||
expRequests: map[int64]bool{10: true, 11: true},
|
||||
expPeerResults: []testPeerResult{{id: "P1", numPendingBlockRequests: 2}},
|
||||
expnumPendingBlockRequests: 2,
|
||||
},
|
||||
{
|
||||
name: "n peers - send n*maxRequestsPerPeer block requests",
|
||||
pool: makeBlockPool(
|
||||
testBcR,
|
||||
10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{}),
|
||||
maxRequestsPerPeer: 2,
|
||||
expRequests: map[int64]bool{10: true, 11: true},
|
||||
expPeerResults: []testPeerResult{
|
||||
{id: "P1", numPendingBlockRequests: 2},
|
||||
{id: "P2", numPendingBlockRequests: 2}},
|
||||
expnumPendingBlockRequests: 4,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
resetPoolTestResults()
|
||||
|
||||
var pool = tt.pool
|
||||
maxRequestsPerPeer = tt.maxRequestsPerPeer
|
||||
pool.MakeNextRequests(10)
|
||||
assert.Equal(t, testResults.numRequestsSent, maxRequestsPerPeer*len(pool.peers))
|
||||
|
||||
for _, tPeer := range tt.expPeerResults {
|
||||
var peer = pool.peers[tPeer.id]
|
||||
assert.NotNil(t, peer)
|
||||
assert.Equal(t, tPeer.numPendingBlockRequests, peer.NumPendingBlockRequests)
|
||||
}
|
||||
assert.Equal(t, testResults.numRequestsSent, maxRequestsPerPeer*len(pool.peers))
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolAddBlock(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
block *types.Block
|
||||
blockSize int
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
args args
|
||||
poolWanted *BlockPool
|
||||
errWanted error
|
||||
}{
|
||||
{name: "block from unknown peer",
|
||||
pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
args: args{
|
||||
peerID: "P2",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
errWanted: errBadDataFromPeer,
|
||||
},
|
||||
{name: "unexpected block 11 from known peer - waiting for 10",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P1",
|
||||
block: types.MakeBlock(int64(11), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{name: "unexpected block 10 from known peer - already have 10",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P1",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P1", false}}),
|
||||
errWanted: errDuplicateBlock,
|
||||
},
|
||||
{name: "unexpected block 10 from known peer P2 - expected 10 to come from P1",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P2",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
errWanted: errBadDataFromPeer,
|
||||
},
|
||||
{name: "expected block from known peer",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P1",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}}),
|
||||
errWanted: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.pool.AddBlock(tt.args.peerID, tt.args.block, tt.args.blockSize)
|
||||
assert.Equal(t, tt.errWanted, err)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolFirstTwoBlocksAndPeers(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
firstWanted int64
|
||||
secondWanted int64
|
||||
errWanted error
|
||||
}{
|
||||
{
|
||||
name: "both blocks missing",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{
|
||||
name: "second block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 18: {"P2", true}}),
|
||||
firstWanted: 15,
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{
|
||||
name: "first block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{16: {"P2", true}, 18: {"P2", true}}),
|
||||
secondWanted: 16,
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{
|
||||
name: "both blocks present",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P2", true}}),
|
||||
firstWanted: 10,
|
||||
secondWanted: 11,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pool := tt.pool
|
||||
gotFirst, gotSecond, err := pool.FirstTwoBlocksAndPeers()
|
||||
assert.Equal(t, tt.errWanted, err)
|
||||
|
||||
if tt.firstWanted != 0 {
|
||||
peer := pool.blocks[tt.firstWanted]
|
||||
block := pool.peers[peer].blocks[tt.firstWanted]
|
||||
assert.Equal(t, block, gotFirst.block,
|
||||
"BlockPool.FirstTwoBlocksAndPeers() gotFirst = %v, want %v",
|
||||
tt.firstWanted, gotFirst.block.Height)
|
||||
}
|
||||
|
||||
if tt.secondWanted != 0 {
|
||||
peer := pool.blocks[tt.secondWanted]
|
||||
block := pool.peers[peer].blocks[tt.secondWanted]
|
||||
assert.Equal(t, block, gotSecond.block,
|
||||
"BlockPool.FirstTwoBlocksAndPeers() gotFirst = %v, want %v",
|
||||
tt.secondWanted, gotSecond.block.Height)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolInvalidateFirstTwoBlocks(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "both blocks missing",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
|
||||
},
|
||||
{
|
||||
name: "second block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 18: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{18: {"P2", true}}),
|
||||
},
|
||||
{
|
||||
name: "first block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{18: {"P1", true}, 16: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{18: {"P1", true}}),
|
||||
},
|
||||
{
|
||||
name: "both blocks present",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{},
|
||||
map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.InvalidateFirstTwoBlocks(errNoPeerResponse)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessedCurrentHeightBlock(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "one peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 101, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{101: {"P1", true}}),
|
||||
},
|
||||
{
|
||||
name: "multiple peers",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 101,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.ProcessedCurrentHeightBlock()
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovePeerAtCurrentHeight(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "one peer, remove peer for block at H",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", false}, 101: {"P1", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "one peer, remove peer for block at H+1",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "multiple peers, remove peer for block at H",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", false}, 104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
},
|
||||
{
|
||||
name: "multiple peers, remove peer for block at H+1",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", false}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.RemovePeerAtCurrentHeights(errNoPeerResponse)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
622
blockchain/v1/reactor.go
Normal file
622
blockchain/v1/reactor.go
Normal file
@@ -0,0 +1,622 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
|
||||
BlockchainChannel = byte(0x40)
|
||||
trySyncIntervalMS = 10
|
||||
trySendIntervalMS = 10
|
||||
|
||||
// ask for best height every 10s
|
||||
statusUpdateIntervalSeconds = 10
|
||||
|
||||
// NOTE: keep up to date with bcBlockResponseMessage
|
||||
bcBlockResponseMessagePrefixSize = 4
|
||||
bcBlockResponseMessageFieldKeySize = 1
|
||||
maxMsgSize = types.MaxBlockSizeBytes +
|
||||
bcBlockResponseMessagePrefixSize +
|
||||
bcBlockResponseMessageFieldKeySize
|
||||
)
|
||||
|
||||
var (
|
||||
// Maximum number of requests that can be pending per peer, i.e. for which requests have been sent but blocks
|
||||
// have not been received.
|
||||
maxRequestsPerPeer = 20
|
||||
// Maximum number of block requests for the reactor, pending or for which blocks have been received.
|
||||
maxNumRequests = 64
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// for when we switch from blockchain reactor and fast sync to
|
||||
// the consensus machine
|
||||
SwitchToConsensus(sm.State, int)
|
||||
}
|
||||
|
||||
// BlockchainReactor handles long-term catchup syncing.
|
||||
type BlockchainReactor struct {
|
||||
p2p.BaseReactor
|
||||
|
||||
initialState sm.State // immutable
|
||||
state sm.State
|
||||
|
||||
blockExec *sm.BlockExecutor
|
||||
store *store.BlockStore
|
||||
|
||||
fastSync bool
|
||||
|
||||
fsm *BcReactorFSM
|
||||
blocksSynced int
|
||||
|
||||
// Receive goroutine forwards messages to this channel to be processed in the context of the poolRoutine.
|
||||
messagesForFSMCh chan bcReactorMessage
|
||||
|
||||
// Switch goroutine may send RemovePeer to the blockchain reactor. This is an error message that is relayed
|
||||
// to this channel to be processed in the context of the poolRoutine.
|
||||
errorsForFSMCh chan bcReactorMessage
|
||||
|
||||
// This channel is used by the FSM and indirectly the block pool to report errors to the blockchain reactor and
|
||||
// the switch.
|
||||
eventsFromFSMCh chan bcFsmMessage
|
||||
|
||||
swReporter *behaviour.SwitchReporter
|
||||
}
|
||||
|
||||
// NewBlockchainReactor returns new reactor instance.
|
||||
func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
|
||||
fastSync bool) *BlockchainReactor {
|
||||
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
|
||||
store.Height()))
|
||||
}
|
||||
|
||||
const capacity = 1000
|
||||
eventsFromFSMCh := make(chan bcFsmMessage, capacity)
|
||||
messagesForFSMCh := make(chan bcReactorMessage, capacity)
|
||||
errorsForFSMCh := make(chan bcReactorMessage, capacity)
|
||||
|
||||
startHeight := store.Height() + 1
|
||||
bcR := &BlockchainReactor{
|
||||
initialState: state,
|
||||
state: state,
|
||||
blockExec: blockExec,
|
||||
fastSync: fastSync,
|
||||
store: store,
|
||||
messagesForFSMCh: messagesForFSMCh,
|
||||
eventsFromFSMCh: eventsFromFSMCh,
|
||||
errorsForFSMCh: errorsForFSMCh,
|
||||
}
|
||||
fsm := NewFSM(startHeight, bcR)
|
||||
bcR.fsm = fsm
|
||||
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
|
||||
//bcR.swReporter = behaviour.NewSwitcReporter(bcR.BaseReactor.Switch)
|
||||
|
||||
return bcR
|
||||
}
|
||||
|
||||
// bcReactorMessage is used by the reactor to send messages to the FSM.
|
||||
type bcReactorMessage struct {
|
||||
event bReactorEvent
|
||||
data bReactorEventData
|
||||
}
|
||||
|
||||
type bFsmEvent uint
|
||||
|
||||
const (
|
||||
// message type events
|
||||
peerErrorEv = iota + 1
|
||||
syncFinishedEv
|
||||
)
|
||||
|
||||
type bFsmEventData struct {
|
||||
peerID p2p.ID
|
||||
err error
|
||||
}
|
||||
|
||||
// bcFsmMessage is used by the FSM to send messages to the reactor
|
||||
type bcFsmMessage struct {
|
||||
event bFsmEvent
|
||||
data bFsmEventData
|
||||
}
|
||||
|
||||
// SetLogger implements service.Service by setting the logger on reactor and pool.
|
||||
func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
|
||||
bcR.BaseService.Logger = l
|
||||
bcR.fsm.SetLogger(l)
|
||||
}
|
||||
|
||||
// OnStart implements service.Service.
|
||||
func (bcR *BlockchainReactor) OnStart() error {
|
||||
bcR.swReporter = behaviour.NewSwitcReporter(bcR.BaseReactor.Switch)
|
||||
if bcR.fastSync {
|
||||
go bcR.poolRoutine()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements service.Service.
|
||||
func (bcR *BlockchainReactor) OnStop() {
|
||||
_ = bcR.Stop()
|
||||
}
|
||||
|
||||
// GetChannels implements Reactor
|
||||
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: BlockchainChannel,
|
||||
Priority: 10,
|
||||
SendQueueCapacity: 2000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
|
||||
peer.Send(BlockchainChannel, msgBytes)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
// bcStatusResponseMessage from the peer and call pool.updatePeer()
|
||||
}
|
||||
|
||||
// sendBlockToPeer loads a block and sends it to the requesting peer.
|
||||
// If the block doesn't exist a bcNoBlockResponseMessage is sent.
|
||||
// If all nodes are honest, no node should be requesting for a block that doesn't exist.
|
||||
func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcBlockRequestMessage,
|
||||
src p2p.Peer) (queued bool) {
|
||||
|
||||
block := bcR.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcBlockResponseMessage{Block: block})
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcNoBlockResponseMessage{Height: msg.Height})
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcStatusRequestMessage, src p2p.Peer) (queued bool) {
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
// RemovePeer implements Reactor by removing peer from the pool.
|
||||
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
msgData := bcReactorMessage{
|
||||
event: peerRemoveEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peer.ID(),
|
||||
err: errSwitchRemovesPeer,
|
||||
},
|
||||
}
|
||||
bcR.errorsForFSMCh <- msgData
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := decodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("error decoding message",
|
||||
"src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if err = msg.ValidateBasic(); err != nil {
|
||||
bcR.Logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *bcBlockRequestMessage:
|
||||
if queued := bcR.sendBlockToPeer(msg, src); !queued {
|
||||
// Unfortunately not queued since the queue is full.
|
||||
bcR.Logger.Error("Could not send block message to peer", "src", src, "height", msg.Height)
|
||||
}
|
||||
|
||||
case *bcStatusRequestMessage:
|
||||
// Send peer our state.
|
||||
if queued := bcR.sendStatusResponseToPeer(msg, src); !queued {
|
||||
// Unfortunately not queued since the queue is full.
|
||||
bcR.Logger.Error("Could not send status message to peer", "src", src)
|
||||
}
|
||||
|
||||
case *bcBlockResponseMessage:
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: src.ID(),
|
||||
height: msg.Block.Height,
|
||||
block: msg.Block,
|
||||
length: len(msgBytes),
|
||||
},
|
||||
}
|
||||
bcR.Logger.Info("Received", "src", src, "height", msg.Block.Height)
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
|
||||
case *bcStatusResponseMessage:
|
||||
// Got a peer status. Unverified.
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: statusResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: src.ID(),
|
||||
height: msg.Height,
|
||||
length: len(msgBytes),
|
||||
},
|
||||
}
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
|
||||
default:
|
||||
bcR.Logger.Error(fmt.Sprintf("unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
}
|
||||
|
||||
// processBlocksRoutine processes blocks until signlaed to stop over the stopProcessing channel
|
||||
func (bcR *BlockchainReactor) processBlocksRoutine(stopProcessing chan struct{}) {
|
||||
|
||||
processReceivedBlockTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
doProcessBlockCh := make(chan struct{}, 1)
|
||||
|
||||
lastHundred := time.Now()
|
||||
lastRate := 0.0
|
||||
|
||||
ForLoop:
|
||||
for {
|
||||
select {
|
||||
case <-stopProcessing:
|
||||
bcR.Logger.Info("finishing block execution")
|
||||
break ForLoop
|
||||
case <-processReceivedBlockTicker.C: // try to execute blocks
|
||||
select {
|
||||
case doProcessBlockCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-doProcessBlockCh:
|
||||
for {
|
||||
err := bcR.processBlock()
|
||||
if err == errMissingBlock {
|
||||
break
|
||||
}
|
||||
// Notify FSM of block processing result.
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: processedBlockEv,
|
||||
data: bReactorEventData{
|
||||
err: err,
|
||||
},
|
||||
}
|
||||
_ = bcR.fsm.Handle(&msgForFSM)
|
||||
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
bcR.blocksSynced++
|
||||
if bcR.blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
height, maxPeerHeight := bcR.fsm.Status()
|
||||
bcR.Logger.Info("Fast Sync Rate", "height", height,
|
||||
"max_peer_height", maxPeerHeight, "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// poolRoutine receives and handles messages from the Receive() routine and from the FSM.
|
||||
func (bcR *BlockchainReactor) poolRoutine() {
|
||||
|
||||
bcR.fsm.Start()
|
||||
|
||||
sendBlockRequestTicker := time.NewTicker(trySendIntervalMS * time.Millisecond)
|
||||
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
|
||||
|
||||
stopProcessing := make(chan struct{}, 1)
|
||||
go bcR.processBlocksRoutine(stopProcessing)
|
||||
|
||||
ForLoop:
|
||||
for {
|
||||
select {
|
||||
|
||||
case <-sendBlockRequestTicker.C:
|
||||
if !bcR.fsm.NeedsBlocks() {
|
||||
continue
|
||||
}
|
||||
_ = bcR.fsm.Handle(&bcReactorMessage{
|
||||
event: makeRequestsEv,
|
||||
data: bReactorEventData{
|
||||
maxNumRequests: maxNumRequests}})
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// Ask for status updates.
|
||||
go bcR.sendStatusRequest()
|
||||
|
||||
case msg := <-bcR.messagesForFSMCh:
|
||||
// Sent from the Receive() routine when status (statusResponseEv) and
|
||||
// block (blockResponseEv) response events are received
|
||||
_ = bcR.fsm.Handle(&msg)
|
||||
|
||||
case msg := <-bcR.errorsForFSMCh:
|
||||
// Sent from the switch.RemovePeer() routine (RemovePeerEv) and
|
||||
// FSM state timer expiry routine (stateTimeoutEv).
|
||||
_ = bcR.fsm.Handle(&msg)
|
||||
|
||||
case msg := <-bcR.eventsFromFSMCh:
|
||||
switch msg.event {
|
||||
case syncFinishedEv:
|
||||
stopProcessing <- struct{}{}
|
||||
// Sent from the FSM when it enters finished state.
|
||||
break ForLoop
|
||||
case peerErrorEv:
|
||||
// Sent from the FSM when it detects peer error
|
||||
bcR.reportPeerErrorToSwitch(msg.data.err, msg.data.peerID)
|
||||
if msg.data.err == errNoPeerResponse {
|
||||
// Sent from the peer timeout handler routine
|
||||
_ = bcR.fsm.Handle(&bcReactorMessage{
|
||||
event: peerRemoveEv,
|
||||
data: bReactorEventData{
|
||||
peerID: msg.data.peerID,
|
||||
err: msg.data.err,
|
||||
},
|
||||
})
|
||||
}
|
||||
// else {
|
||||
// For slow peers, or errors due to blocks received from wrong peer
|
||||
// the FSM had already removed the peers
|
||||
// }
|
||||
default:
|
||||
bcR.Logger.Error("Event from FSM not supported", "type", msg.event)
|
||||
}
|
||||
|
||||
case <-bcR.Quit():
|
||||
break ForLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) reportPeerErrorToSwitch(err error, peerID p2p.ID) {
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
if peer != nil {
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(peerID, err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) processBlock() error {
|
||||
|
||||
first, second, err := bcR.fsm.FirstTwoBlocks()
|
||||
if err != nil {
|
||||
// We need both to sync the first block.
|
||||
return err
|
||||
}
|
||||
|
||||
chainID := bcR.initialState.ChainID
|
||||
|
||||
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstPartsHeader := firstParts.Header()
|
||||
firstID := types.BlockID{Hash: first.Hash(), PartsHeader: firstPartsHeader}
|
||||
// Finally, verify the first block using the second's commit
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
err = bcR.state.Validators.VerifyCommit(chainID, firstID, first.Height, second.LastCommit)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("error during commit verification", "err", err,
|
||||
"first", first.Height, "second", second.Height)
|
||||
return errBlockVerificationFailure
|
||||
}
|
||||
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
bcR.state, err = bcR.blockExec.ApplyBlock(bcR.state, firstID, first)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// sendStatusRequest broadcasts `BlockStore` height.
|
||||
func (bcR *BlockchainReactor) sendStatusRequest() {
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{bcR.store.Height()})
|
||||
bcR.Switch.Broadcast(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// BlockRequest sends `BlockRequest` height.
|
||||
func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
if peer == nil {
|
||||
return errNilPeerForBlockRequest
|
||||
}
|
||||
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcBlockRequestMessage{height})
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
return errSendQueueFull
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
func (bcR *BlockchainReactor) switchToConsensus() {
|
||||
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(bcR.state, bcR.blocksSynced)
|
||||
bcR.eventsFromFSMCh <- bcFsmMessage{event: syncFinishedEv}
|
||||
}
|
||||
// else {
|
||||
// Should only happen during testing.
|
||||
// }
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// Called by FSM and pool:
|
||||
// - pool calls when it detects slow peer or when peer times out
|
||||
// - FSM calls when:
|
||||
// - adding a block (addBlock) fails
|
||||
// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks
|
||||
func (bcR *BlockchainReactor) sendPeerError(err error, peerID p2p.ID) {
|
||||
bcR.Logger.Info("sendPeerError:", "peer", peerID, "error", err)
|
||||
msgData := bcFsmMessage{
|
||||
event: peerErrorEv,
|
||||
data: bFsmEventData{
|
||||
peerID: peerID,
|
||||
err: err,
|
||||
},
|
||||
}
|
||||
bcR.eventsFromFSMCh <- msgData
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
func (bcR *BlockchainReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
|
||||
if timer == nil {
|
||||
panic("nil timer pointer parameter")
|
||||
}
|
||||
if *timer == nil {
|
||||
*timer = time.AfterFunc(timeout, func() {
|
||||
msg := bcReactorMessage{
|
||||
event: stateTimeoutEv,
|
||||
data: bReactorEventData{
|
||||
stateName: name,
|
||||
},
|
||||
}
|
||||
bcR.errorsForFSMCh <- msg
|
||||
})
|
||||
} else {
|
||||
(*timer).Reset(timeout)
|
||||
}
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Messages
|
||||
|
||||
// BlockchainMessage is a generic message for this reactor.
|
||||
type BlockchainMessage interface {
|
||||
ValidateBasic() error
|
||||
}
|
||||
|
||||
// RegisterBlockchainMessages registers the fast sync messages for amino encoding.
|
||||
func RegisterBlockchainMessages(cdc *amino.Codec) {
|
||||
cdc.RegisterInterface((*BlockchainMessage)(nil), nil)
|
||||
cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/blockchain/BlockRequest", nil)
|
||||
cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/blockchain/BlockResponse", nil)
|
||||
cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/blockchain/NoBlockResponse", nil)
|
||||
cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/blockchain/StatusResponse", nil)
|
||||
cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/blockchain/StatusRequest", nil)
|
||||
}
|
||||
|
||||
func decodeMsg(bz []byte) (msg BlockchainMessage, err error) {
|
||||
if len(bz) > maxMsgSize {
|
||||
return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
|
||||
}
|
||||
err = cdc.UnmarshalBinaryBare(bz, &msg)
|
||||
return
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type bcBlockRequestMessage struct {
|
||||
Height int64
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcBlockRequestMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *bcBlockRequestMessage) String() string {
|
||||
return fmt.Sprintf("[bcBlockRequestMessage %v]", m.Height)
|
||||
}
|
||||
|
||||
type bcNoBlockResponseMessage struct {
|
||||
Height int64
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcNoBlockResponseMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *bcNoBlockResponseMessage) String() string {
|
||||
return fmt.Sprintf("[bcNoBlockResponseMessage %d]", m.Height)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type bcBlockResponseMessage struct {
|
||||
Block *types.Block
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcBlockResponseMessage) ValidateBasic() error {
|
||||
return m.Block.ValidateBasic()
|
||||
}
|
||||
|
||||
func (m *bcBlockResponseMessage) String() string {
|
||||
return fmt.Sprintf("[bcBlockResponseMessage %v]", m.Block.Height)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type bcStatusRequestMessage struct {
|
||||
Height int64
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcStatusRequestMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *bcStatusRequestMessage) String() string {
|
||||
return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type bcStatusResponseMessage struct {
|
||||
Height int64
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcStatusResponseMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *bcStatusResponseMessage) String() string {
|
||||
return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height)
|
||||
}
|
||||
452
blockchain/v1/reactor_fsm.go
Normal file
452
blockchain/v1/reactor_fsm.go
Normal file
@@ -0,0 +1,452 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Blockchain Reactor State
|
||||
type bcReactorFSMState struct {
|
||||
name string
|
||||
|
||||
// called when transitioning out of current state
|
||||
handle func(*BcReactorFSM, bReactorEvent, bReactorEventData) (next *bcReactorFSMState, err error)
|
||||
// called when entering the state
|
||||
enter func(fsm *BcReactorFSM)
|
||||
|
||||
// timeout to ensure FSM is not stuck in a state forever
|
||||
// the timer is owned and run by the fsm instance
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func (s *bcReactorFSMState) String() string {
|
||||
return s.name
|
||||
}
|
||||
|
||||
// BcReactorFSM is the datastructure for the Blockchain Reactor State Machine
|
||||
type BcReactorFSM struct {
|
||||
logger log.Logger
|
||||
mtx sync.Mutex
|
||||
|
||||
startTime time.Time
|
||||
|
||||
state *bcReactorFSMState
|
||||
stateTimer *time.Timer
|
||||
pool *BlockPool
|
||||
|
||||
// interface used to call the Blockchain reactor to send StatusRequest, BlockRequest, reporting errors, etc.
|
||||
toBcR bcReactor
|
||||
}
|
||||
|
||||
// NewFSM creates a new reactor FSM.
|
||||
func NewFSM(height int64, toBcR bcReactor) *BcReactorFSM {
|
||||
return &BcReactorFSM{
|
||||
state: unknown,
|
||||
startTime: time.Now(),
|
||||
pool: NewBlockPool(height, toBcR),
|
||||
toBcR: toBcR,
|
||||
}
|
||||
}
|
||||
|
||||
// bReactorEventData is part of the message sent by the reactor to the FSM and used by the state handlers.
|
||||
type bReactorEventData struct {
|
||||
peerID p2p.ID
|
||||
err error // for peer error: timeout, slow; for processed block event if error occurred
|
||||
height int64 // for status response; for processed block event
|
||||
block *types.Block // for block response
|
||||
stateName string // for state timeout events
|
||||
length int // for block response event, length of received block, used to detect slow peers
|
||||
maxNumRequests int // for request needed event, maximum number of pending requests
|
||||
}
|
||||
|
||||
// Blockchain Reactor Events (the input to the state machine)
|
||||
type bReactorEvent uint
|
||||
|
||||
const (
|
||||
// message type events
|
||||
startFSMEv = iota + 1
|
||||
statusResponseEv
|
||||
blockResponseEv
|
||||
processedBlockEv
|
||||
makeRequestsEv
|
||||
stopFSMEv
|
||||
|
||||
// other events
|
||||
peerRemoveEv = iota + 256
|
||||
stateTimeoutEv
|
||||
)
|
||||
|
||||
func (msg *bcReactorMessage) String() string {
|
||||
var dataStr string
|
||||
|
||||
switch msg.event {
|
||||
case startFSMEv:
|
||||
dataStr = ""
|
||||
case statusResponseEv:
|
||||
dataStr = fmt.Sprintf("peer=%v height=%v", msg.data.peerID, msg.data.height)
|
||||
case blockResponseEv:
|
||||
dataStr = fmt.Sprintf("peer=%v block.height=%v length=%v",
|
||||
msg.data.peerID, msg.data.block.Height, msg.data.length)
|
||||
case processedBlockEv:
|
||||
dataStr = fmt.Sprintf("error=%v", msg.data.err)
|
||||
case makeRequestsEv:
|
||||
dataStr = ""
|
||||
case stopFSMEv:
|
||||
dataStr = ""
|
||||
case peerRemoveEv:
|
||||
dataStr = fmt.Sprintf("peer: %v is being removed by the switch", msg.data.peerID)
|
||||
case stateTimeoutEv:
|
||||
dataStr = fmt.Sprintf("state=%v", msg.data.stateName)
|
||||
default:
|
||||
dataStr = fmt.Sprintf("cannot interpret message data")
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v: %v", msg.event, dataStr)
|
||||
}
|
||||
|
||||
func (ev bReactorEvent) String() string {
|
||||
switch ev {
|
||||
case startFSMEv:
|
||||
return "startFSMEv"
|
||||
case statusResponseEv:
|
||||
return "statusResponseEv"
|
||||
case blockResponseEv:
|
||||
return "blockResponseEv"
|
||||
case processedBlockEv:
|
||||
return "processedBlockEv"
|
||||
case makeRequestsEv:
|
||||
return "makeRequestsEv"
|
||||
case stopFSMEv:
|
||||
return "stopFSMEv"
|
||||
case peerRemoveEv:
|
||||
return "peerRemoveEv"
|
||||
case stateTimeoutEv:
|
||||
return "stateTimeoutEv"
|
||||
default:
|
||||
return "event unknown"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// states
|
||||
var (
|
||||
unknown *bcReactorFSMState
|
||||
waitForPeer *bcReactorFSMState
|
||||
waitForBlock *bcReactorFSMState
|
||||
finished *bcReactorFSMState
|
||||
)
|
||||
|
||||
// timeouts for state timers
|
||||
const (
|
||||
waitForPeerTimeout = 3 * time.Second
|
||||
waitForBlockAtCurrentHeightTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
// errors
|
||||
var (
|
||||
// internal to the package
|
||||
errNoErrorFinished = errors.New("fast sync is finished")
|
||||
errInvalidEvent = errors.New("invalid event in current state")
|
||||
errMissingBlock = errors.New("missing blocks")
|
||||
errNilPeerForBlockRequest = errors.New("peer for block request does not exist in the switch")
|
||||
errSendQueueFull = errors.New("block request not made, send-queue is full")
|
||||
errPeerTooShort = errors.New("peer height too low, old peer removed/ new peer not added")
|
||||
errSwitchRemovesPeer = errors.New("switch is removing peer")
|
||||
errTimeoutEventWrongState = errors.New("timeout event for a state different than the current one")
|
||||
errNoTallerPeer = errors.New("fast sync timed out on waiting for a peer taller than this node")
|
||||
|
||||
// reported eventually to the switch
|
||||
// handle return
|
||||
errPeerLowersItsHeight = errors.New("fast sync peer reports a height lower than previous")
|
||||
// handle return
|
||||
errNoPeerResponseForCurrentHeights = errors.New("fast sync timed out on peer block response for current heights")
|
||||
errNoPeerResponse = errors.New("fast sync timed out on peer block response") // xx
|
||||
errBadDataFromPeer = errors.New("fast sync received block from wrong peer or block is bad") // xx
|
||||
errDuplicateBlock = errors.New("fast sync received duplicate block from peer")
|
||||
errBlockVerificationFailure = errors.New("fast sync block verification failure") // xx
|
||||
errSlowPeer = errors.New("fast sync peer is not sending us data fast enough") // xx
|
||||
|
||||
)
|
||||
|
||||
func init() {
|
||||
unknown = &bcReactorFSMState{
|
||||
name: "unknown",
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
switch ev {
|
||||
case startFSMEv:
|
||||
// Broadcast Status message. Currently doesn't return non-nil error.
|
||||
fsm.toBcR.sendStatusRequest()
|
||||
return waitForPeer, nil
|
||||
|
||||
case stopFSMEv:
|
||||
return finished, errNoErrorFinished
|
||||
|
||||
default:
|
||||
return unknown, errInvalidEvent
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
waitForPeer = &bcReactorFSMState{
|
||||
name: "waitForPeer",
|
||||
timeout: waitForPeerTimeout,
|
||||
enter: func(fsm *BcReactorFSM) {
|
||||
// Stop when leaving the state.
|
||||
fsm.resetStateTimer()
|
||||
},
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
switch ev {
|
||||
case stateTimeoutEv:
|
||||
if data.stateName != "waitForPeer" {
|
||||
fsm.logger.Error("received a state timeout event for different state",
|
||||
"state", data.stateName)
|
||||
return waitForPeer, errTimeoutEventWrongState
|
||||
}
|
||||
// There was no statusResponse received from any peer.
|
||||
// Should we send status request again?
|
||||
return finished, errNoTallerPeer
|
||||
|
||||
case statusResponseEv:
|
||||
if err := fsm.pool.UpdatePeer(data.peerID, data.height); err != nil {
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, err
|
||||
}
|
||||
}
|
||||
if fsm.stateTimer != nil {
|
||||
fsm.stateTimer.Stop()
|
||||
}
|
||||
return waitForBlock, nil
|
||||
|
||||
case stopFSMEv:
|
||||
if fsm.stateTimer != nil {
|
||||
fsm.stateTimer.Stop()
|
||||
}
|
||||
return finished, errNoErrorFinished
|
||||
|
||||
default:
|
||||
return waitForPeer, errInvalidEvent
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
waitForBlock = &bcReactorFSMState{
|
||||
name: "waitForBlock",
|
||||
timeout: waitForBlockAtCurrentHeightTimeout,
|
||||
enter: func(fsm *BcReactorFSM) {
|
||||
// Stop when leaving the state.
|
||||
fsm.resetStateTimer()
|
||||
},
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
switch ev {
|
||||
|
||||
case statusResponseEv:
|
||||
err := fsm.pool.UpdatePeer(data.peerID, data.height)
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, err
|
||||
}
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, err
|
||||
}
|
||||
return waitForBlock, err
|
||||
|
||||
case blockResponseEv:
|
||||
fsm.logger.Debug("blockResponseEv", "H", data.block.Height)
|
||||
err := fsm.pool.AddBlock(data.peerID, data.block, data.length)
|
||||
if err != nil {
|
||||
// A block was received that was unsolicited, from unexpected peer, or that we already have it.
|
||||
// Ignore block, remove peer and send error to switch.
|
||||
fsm.pool.RemovePeer(data.peerID, err)
|
||||
fsm.toBcR.sendPeerError(err, data.peerID)
|
||||
}
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, err
|
||||
}
|
||||
return waitForBlock, err
|
||||
|
||||
case processedBlockEv:
|
||||
if data.err != nil {
|
||||
first, second, _ := fsm.pool.FirstTwoBlocksAndPeers()
|
||||
fsm.logger.Error("error processing block", "err", data.err,
|
||||
"first", first.block.Height, "second", second.block.Height)
|
||||
fsm.logger.Error("send peer error for", "peer", first.peer.ID)
|
||||
fsm.toBcR.sendPeerError(data.err, first.peer.ID)
|
||||
fsm.logger.Error("send peer error for", "peer", second.peer.ID)
|
||||
fsm.toBcR.sendPeerError(data.err, second.peer.ID)
|
||||
// Remove the first two blocks. This will also remove the peers
|
||||
fsm.pool.InvalidateFirstTwoBlocks(data.err)
|
||||
} else {
|
||||
fsm.pool.ProcessedCurrentHeightBlock()
|
||||
// Since we advanced one block reset the state timer
|
||||
fsm.resetStateTimer()
|
||||
}
|
||||
|
||||
// Both cases above may result in achieving maximum height.
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, nil
|
||||
}
|
||||
|
||||
return waitForBlock, data.err
|
||||
|
||||
case peerRemoveEv:
|
||||
// This event is sent by the switch to remove disconnected and errored peers.
|
||||
fsm.pool.RemovePeer(data.peerID, data.err)
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, nil
|
||||
}
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, nil
|
||||
}
|
||||
return waitForBlock, nil
|
||||
|
||||
case makeRequestsEv:
|
||||
fsm.makeNextRequests(data.maxNumRequests)
|
||||
return waitForBlock, nil
|
||||
|
||||
case stateTimeoutEv:
|
||||
if data.stateName != "waitForBlock" {
|
||||
fsm.logger.Error("received a state timeout event for different state",
|
||||
"state", data.stateName)
|
||||
return waitForBlock, errTimeoutEventWrongState
|
||||
}
|
||||
// We haven't received the block at current height or height+1. Remove peer.
|
||||
fsm.pool.RemovePeerAtCurrentHeights(errNoPeerResponseForCurrentHeights)
|
||||
fsm.resetStateTimer()
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, errNoPeerResponseForCurrentHeights
|
||||
}
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, nil
|
||||
}
|
||||
return waitForBlock, errNoPeerResponseForCurrentHeights
|
||||
|
||||
case stopFSMEv:
|
||||
if fsm.stateTimer != nil {
|
||||
fsm.stateTimer.Stop()
|
||||
}
|
||||
return finished, errNoErrorFinished
|
||||
|
||||
default:
|
||||
return waitForBlock, errInvalidEvent
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
finished = &bcReactorFSMState{
|
||||
name: "finished",
|
||||
enter: func(fsm *BcReactorFSM) {
|
||||
fsm.logger.Info("Time to switch to consensus reactor!", "height", fsm.pool.Height)
|
||||
fsm.toBcR.switchToConsensus()
|
||||
fsm.cleanup()
|
||||
},
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
return finished, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Interface used by FSM for sending Block and Status requests,
|
||||
// informing of peer errors and state timeouts
|
||||
// Implemented by BlockchainReactor and tests
|
||||
type bcReactor interface {
|
||||
sendStatusRequest()
|
||||
sendBlockRequest(peerID p2p.ID, height int64) error
|
||||
sendPeerError(err error, peerID p2p.ID)
|
||||
resetStateTimer(name string, timer **time.Timer, timeout time.Duration)
|
||||
switchToConsensus()
|
||||
}
|
||||
|
||||
// SetLogger sets the FSM logger.
|
||||
func (fsm *BcReactorFSM) SetLogger(l log.Logger) {
|
||||
fsm.logger = l
|
||||
fsm.pool.SetLogger(l)
|
||||
}
|
||||
|
||||
// Start starts the FSM.
|
||||
func (fsm *BcReactorFSM) Start() {
|
||||
_ = fsm.Handle(&bcReactorMessage{event: startFSMEv})
|
||||
}
|
||||
|
||||
// Handle processes messages and events sent to the FSM.
|
||||
func (fsm *BcReactorFSM) Handle(msg *bcReactorMessage) error {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
fsm.logger.Debug("FSM received", "event", msg, "state", fsm.state)
|
||||
|
||||
if fsm.state == nil {
|
||||
fsm.state = unknown
|
||||
}
|
||||
next, err := fsm.state.handle(fsm, msg.event, msg.data)
|
||||
if err != nil {
|
||||
fsm.logger.Error("FSM event handler returned", "err", err,
|
||||
"state", fsm.state, "event", msg.event)
|
||||
}
|
||||
|
||||
oldState := fsm.state.name
|
||||
fsm.transition(next)
|
||||
if oldState != fsm.state.name {
|
||||
fsm.logger.Info("FSM changed state", "new_state", fsm.state)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) transition(next *bcReactorFSMState) {
|
||||
if next == nil {
|
||||
return
|
||||
}
|
||||
if fsm.state != next {
|
||||
fsm.state = next
|
||||
if next.enter != nil {
|
||||
next.enter(fsm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Called when entering an FSM state in order to detect lack of progress in the state machine.
|
||||
// Note the use of the 'bcr' interface to facilitate testing without timer expiring.
|
||||
func (fsm *BcReactorFSM) resetStateTimer() {
|
||||
fsm.toBcR.resetStateTimer(fsm.state.name, &fsm.stateTimer, fsm.state.timeout)
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) isCaughtUp() bool {
|
||||
return fsm.state == finished
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) makeNextRequests(maxNumRequests int) {
|
||||
fsm.pool.MakeNextRequests(maxNumRequests)
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) cleanup() {
|
||||
fsm.pool.Cleanup()
|
||||
}
|
||||
|
||||
// NeedsBlocks checks if more block requests are required.
|
||||
func (fsm *BcReactorFSM) NeedsBlocks() bool {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
return fsm.state.name == "waitForBlock" && fsm.pool.NeedsBlocks()
|
||||
}
|
||||
|
||||
// FirstTwoBlocks returns the two blocks at pool height and height+1
|
||||
func (fsm *BcReactorFSM) FirstTwoBlocks() (first, second *types.Block, err error) {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
firstBP, secondBP, err := fsm.pool.FirstTwoBlocksAndPeers()
|
||||
if err == nil {
|
||||
first = firstBP.block
|
||||
second = secondBP.block
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Status returns the pool's height and the maximum peer height.
|
||||
func (fsm *BcReactorFSM) Status() (height, maxPeerHeight int64) {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
return fsm.pool.Height, fsm.pool.MaxPeerHeight
|
||||
}
|
||||
943
blockchain/v1/reactor_fsm_test.go
Normal file
943
blockchain/v1/reactor_fsm_test.go
Normal file
@@ -0,0 +1,943 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type lastBlockRequestT struct {
|
||||
peerID p2p.ID
|
||||
height int64
|
||||
}
|
||||
|
||||
type lastPeerErrorT struct {
|
||||
peerID p2p.ID
|
||||
err error
|
||||
}
|
||||
|
||||
// reactor for FSM testing
|
||||
type testReactor struct {
|
||||
logger log.Logger
|
||||
fsm *BcReactorFSM
|
||||
numStatusRequests int
|
||||
numBlockRequests int
|
||||
lastBlockRequest lastBlockRequestT
|
||||
lastPeerError lastPeerErrorT
|
||||
stateTimerStarts map[string]int
|
||||
}
|
||||
|
||||
func sendEventToFSM(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) error {
|
||||
return fsm.Handle(&bcReactorMessage{event: ev, data: data})
|
||||
}
|
||||
|
||||
type fsmStepTestValues struct {
|
||||
currentState string
|
||||
event bReactorEvent
|
||||
data bReactorEventData
|
||||
|
||||
wantErr error
|
||||
wantState string
|
||||
wantStatusReqSent bool
|
||||
wantReqIncreased bool
|
||||
wantNewBlocks []int64
|
||||
wantRemovedPeers []p2p.ID
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// helper test function for different FSM events, state and expected behavior
|
||||
func sStopFSMEv(current, expected string) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: stopFSMEv,
|
||||
wantState: expected,
|
||||
wantErr: errNoErrorFinished}
|
||||
}
|
||||
|
||||
func sUnknownFSMEv(current string) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: 1234,
|
||||
wantState: current,
|
||||
wantErr: errInvalidEvent}
|
||||
}
|
||||
|
||||
func sStartFSMEv() fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: "unknown",
|
||||
event: startFSMEv,
|
||||
wantState: "waitForPeer",
|
||||
wantStatusReqSent: true}
|
||||
}
|
||||
|
||||
func sStateTimeoutEv(current, expected string, timedoutState string, wantErr error) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: stateTimeoutEv,
|
||||
data: bReactorEventData{
|
||||
stateName: timedoutState,
|
||||
},
|
||||
wantState: expected,
|
||||
wantErr: wantErr,
|
||||
}
|
||||
}
|
||||
|
||||
func sProcessedBlockEv(current, expected string, reactorError error) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: processedBlockEv,
|
||||
data: bReactorEventData{
|
||||
err: reactorError,
|
||||
},
|
||||
wantState: expected,
|
||||
wantErr: reactorError,
|
||||
}
|
||||
}
|
||||
|
||||
func sStatusEv(current, expected string, peerID p2p.ID, height int64, err error) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: statusResponseEv,
|
||||
data: bReactorEventData{peerID: peerID, height: height},
|
||||
wantState: expected,
|
||||
wantErr: err}
|
||||
}
|
||||
|
||||
func sMakeRequestsEv(current, expected string, maxPendingRequests int) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: makeRequestsEv,
|
||||
data: bReactorEventData{maxNumRequests: maxPendingRequests},
|
||||
wantState: expected,
|
||||
wantReqIncreased: true,
|
||||
}
|
||||
}
|
||||
|
||||
func sMakeRequestsEvErrored(current, expected string,
|
||||
maxPendingRequests int, err error, peersRemoved []p2p.ID) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: makeRequestsEv,
|
||||
data: bReactorEventData{maxNumRequests: maxPendingRequests},
|
||||
wantState: expected,
|
||||
wantErr: err,
|
||||
wantRemovedPeers: peersRemoved,
|
||||
wantReqIncreased: true,
|
||||
}
|
||||
}
|
||||
|
||||
func sBlockRespEv(current, expected string, peerID p2p.ID, height int64, prevBlocks []int64) fsmStepTestValues {
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peerID,
|
||||
height: height,
|
||||
block: types.MakeBlock(height, txs, nil, nil),
|
||||
length: 100},
|
||||
wantState: expected,
|
||||
wantNewBlocks: append(prevBlocks, height),
|
||||
}
|
||||
}
|
||||
|
||||
func sBlockRespEvErrored(current, expected string,
|
||||
peerID p2p.ID, height int64, prevBlocks []int64, wantErr error, peersRemoved []p2p.ID) fsmStepTestValues {
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peerID,
|
||||
height: height,
|
||||
block: types.MakeBlock(height, txs, nil, nil),
|
||||
length: 100},
|
||||
wantState: expected,
|
||||
wantErr: wantErr,
|
||||
wantRemovedPeers: peersRemoved,
|
||||
wantNewBlocks: prevBlocks,
|
||||
}
|
||||
}
|
||||
|
||||
func sPeerRemoveEv(current, expected string, peerID p2p.ID, err error, peersRemoved []p2p.ID) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: peerRemoveEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peerID,
|
||||
err: err,
|
||||
},
|
||||
wantState: expected,
|
||||
wantRemovedPeers: peersRemoved,
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------
|
||||
|
||||
func newTestReactor(height int64) *testReactor {
|
||||
testBcR := &testReactor{logger: log.TestingLogger(), stateTimerStarts: make(map[string]int)}
|
||||
testBcR.fsm = NewFSM(height, testBcR)
|
||||
testBcR.fsm.SetLogger(testBcR.logger)
|
||||
return testBcR
|
||||
}
|
||||
|
||||
func fixBlockResponseEvStep(step *fsmStepTestValues, testBcR *testReactor) {
|
||||
// There is currently no good way to know to which peer a block request was sent.
|
||||
// So in some cases where it does not matter, before we simulate a block response
|
||||
// we cheat and look where it is expected from.
|
||||
if step.event == blockResponseEv {
|
||||
height := step.data.height
|
||||
peerID, ok := testBcR.fsm.pool.blocks[height]
|
||||
if ok {
|
||||
step.data.peerID = peerID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testFields struct {
|
||||
name string
|
||||
startingHeight int64
|
||||
maxRequestsPerPeer int
|
||||
maxPendingRequests int
|
||||
steps []fsmStepTestValues
|
||||
}
|
||||
|
||||
func executeFSMTests(t *testing.T, tests []testFields, matchRespToReq bool) {
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create test reactor
|
||||
testBcR := newTestReactor(tt.startingHeight)
|
||||
|
||||
if tt.maxRequestsPerPeer != 0 {
|
||||
maxRequestsPerPeer = tt.maxRequestsPerPeer
|
||||
}
|
||||
|
||||
for _, step := range tt.steps {
|
||||
step := step
|
||||
assert.Equal(t, step.currentState, testBcR.fsm.state.name)
|
||||
|
||||
var heightBefore int64
|
||||
if step.event == processedBlockEv && step.data.err == errBlockVerificationFailure {
|
||||
heightBefore = testBcR.fsm.pool.Height
|
||||
}
|
||||
oldNumStatusRequests := testBcR.numStatusRequests
|
||||
oldNumBlockRequests := testBcR.numBlockRequests
|
||||
if matchRespToReq {
|
||||
fixBlockResponseEvStep(&step, testBcR)
|
||||
}
|
||||
|
||||
fsmErr := sendEventToFSM(testBcR.fsm, step.event, step.data)
|
||||
assert.Equal(t, step.wantErr, fsmErr)
|
||||
|
||||
if step.wantStatusReqSent {
|
||||
assert.Equal(t, oldNumStatusRequests+1, testBcR.numStatusRequests)
|
||||
} else {
|
||||
assert.Equal(t, oldNumStatusRequests, testBcR.numStatusRequests)
|
||||
}
|
||||
|
||||
if step.wantReqIncreased {
|
||||
assert.True(t, oldNumBlockRequests < testBcR.numBlockRequests)
|
||||
} else {
|
||||
assert.Equal(t, oldNumBlockRequests, testBcR.numBlockRequests)
|
||||
}
|
||||
|
||||
for _, height := range step.wantNewBlocks {
|
||||
_, err := testBcR.fsm.pool.BlockAndPeerAtHeight(height)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
if step.event == processedBlockEv && step.data.err == errBlockVerificationFailure {
|
||||
heightAfter := testBcR.fsm.pool.Height
|
||||
assert.Equal(t, heightBefore, heightAfter)
|
||||
firstAfter, err1 := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height)
|
||||
secondAfter, err2 := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height + 1)
|
||||
assert.NotNil(t, err1)
|
||||
assert.NotNil(t, err2)
|
||||
assert.Nil(t, firstAfter)
|
||||
assert.Nil(t, secondAfter)
|
||||
}
|
||||
|
||||
assert.Equal(t, step.wantState, testBcR.fsm.state.name)
|
||||
|
||||
if step.wantState == "finished" {
|
||||
assert.True(t, testBcR.fsm.isCaughtUp())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSMBasic(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "one block, one peer - TS2",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 2,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 2, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multi block, multi peer - TS2",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 2,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 4, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 4, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 4, []int64{1, 2, 3}),
|
||||
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, true)
|
||||
}
|
||||
|
||||
func TestFSMBlockVerificationFailure(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "block verification failure - TS2 variant",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
|
||||
// add P1 and get blocks 1-3 from it
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
|
||||
// process block failure, should remove P1 and all blocks
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", errBlockVerificationFailure),
|
||||
|
||||
// get blocks 1-3 from P2
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}),
|
||||
|
||||
// finish after processing blocks 1 and 2
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMBadBlockFromPeer(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "block we haven't asked for",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and ask for blocks 1-3
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 300, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// blockResponseEv for height 100 should cause an error
|
||||
sBlockRespEvErrored("waitForBlock", "waitForPeer",
|
||||
"P1", 100, []int64{}, errMissingBlock, []p2p.ID{}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block we already have",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and get block 1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 100, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P1", 1, []int64{}),
|
||||
|
||||
// Get block 1 again. Since peer is removed together with block 1,
|
||||
// the blocks present in the pool should be {}
|
||||
sBlockRespEvErrored("waitForBlock", "waitForPeer",
|
||||
"P1", 1, []int64{}, errDuplicateBlock, []p2p.ID{"P1"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block from unknown peer",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and get block 1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
|
||||
// get block 1 from unknown peer P2
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEvErrored("waitForBlock", "waitForBlock",
|
||||
"P2", 1, []int64{}, errBadDataFromPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block from wrong peer",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, make requests for blocks 1-3 to P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
|
||||
// receive block 1 from P2
|
||||
sBlockRespEvErrored("waitForBlock", "waitForBlock",
|
||||
"P2", 1, []int64{}, errBadDataFromPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMBlockAtCurrentHeightDoesNotArriveInTime(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "block at current height undelivered - TS5",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, get blocks 1 and 2, process block 1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P1", 2, []int64{1}),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
|
||||
// timeout on block 3, P1 should be removed
|
||||
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForBlock", errNoPeerResponseForCurrentHeights),
|
||||
|
||||
// make requests and finish by receiving blocks 2 and 3 from P2
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{2}),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block at current height undelivered, at maxPeerHeight after peer removal - TS3",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, request blocks 1-3 from P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// add P2 (tallest)
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 30, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// receive blocks 1-3 from P1
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}),
|
||||
|
||||
// process blocks at heights 1 and 2
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// timeout on block at height 4
|
||||
sStateTimeoutEv("waitForBlock", "finished", "waitForBlock", nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, true)
|
||||
}
|
||||
|
||||
func TestFSMPeerRelatedEvents(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "peer remove event with no blocks",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, P2, P3
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P3", 3, nil),
|
||||
|
||||
// switch removes P2
|
||||
sPeerRemoveEv("waitForBlock", "waitForBlock", "P2", errSwitchRemovesPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only peer removed while in waitForBlock state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
|
||||
|
||||
// switch removes P1
|
||||
sPeerRemoveEv("waitForBlock", "waitForPeer", "P1", errSwitchRemovesPeer, []p2p.ID{"P1"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "highest peer removed while in waitForBlock state, node reaches maxPeerHeight - TS4 ",
|
||||
startingHeight: 100,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and make requests
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 101, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 200, nil),
|
||||
|
||||
// get blocks 100 and 101 from P1 and process block at height 100
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 100, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 101, []int64{100}),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// switch removes peer P1, should be finished
|
||||
sPeerRemoveEv("waitForBlock", "finished", "P2", errSwitchRemovesPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "highest peer lowers its height in waitForBlock state, node reaches maxPeerHeight - TS4",
|
||||
startingHeight: 100,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and make requests
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 101, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 200, nil),
|
||||
|
||||
// get blocks 100 and 101 from P1
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 100, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 101, []int64{100}),
|
||||
|
||||
// processed block at heights 100
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// P2 becomes short
|
||||
sStatusEv("waitForBlock", "finished", "P2", 100, errPeerLowersItsHeight),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new short peer while in waitForPeer state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForPeer", "P1", 3, errPeerTooShort),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new short peer while in waitForBlock state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, errPeerTooShort),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only peer updated with low height while in waitForBlock state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
|
||||
sStatusEv("waitForBlock", "waitForPeer", "P1", 3, errPeerLowersItsHeight),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "peer does not exist in the switch",
|
||||
startingHeight: 9999999,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 20000000, nil),
|
||||
// send request for block 9999999
|
||||
// Note: For this block request the "switch missing the peer" error is simulated,
|
||||
// see implementation of bcReactor interface, sendBlockRequest(), in this file.
|
||||
sMakeRequestsEvErrored("waitForBlock", "waitForBlock",
|
||||
maxNumRequests, nil, []p2p.ID{"P1"}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, true)
|
||||
}
|
||||
|
||||
func TestFSMStopFSM(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "stopFSMEv in unknown",
|
||||
steps: []fsmStepTestValues{
|
||||
sStopFSMEv("unknown", "finished"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "stopFSMEv in waitForPeer",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStopFSMEv("waitForPeer", "finished"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "stopFSMEv in waitForBlock",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sStopFSMEv("waitForBlock", "finished"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMUnknownElements(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "unknown event for state unknown",
|
||||
steps: []fsmStepTestValues{
|
||||
sUnknownFSMEv("unknown"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown event for state waitForPeer",
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sUnknownFSMEv("waitForPeer"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown event for state waitForBlock",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sUnknownFSMEv("waitForBlock"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMPeerStateTimeoutEvent(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "timeout event for state waitForPeer while in state waitForPeer - TS1",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStateTimeoutEv("waitForPeer", "finished", "waitForPeer", errNoTallerPeer),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForPeer while in a state != waitForPeer",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStateTimeoutEv("waitForPeer", "waitForPeer", "waitForBlock", errTimeoutEventWrongState),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForBlock while in state waitForBlock ",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sStateTimeoutEv("waitForBlock", "waitForPeer", "waitForBlock", errNoPeerResponseForCurrentHeights),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForBlock while in a state != waitForBlock",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForPeer", errTimeoutEventWrongState),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForBlock with multiple peers",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForBlock", errNoPeerResponseForCurrentHeights),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func makeCorrectTransitionSequence(startingHeight int64, numBlocks int64, numPeers int, randomPeerHeights bool,
|
||||
maxRequestsPerPeer int, maxPendingRequests int) testFields {
|
||||
|
||||
// Generate numPeers peers with random or numBlocks heights according to the randomPeerHeights flag.
|
||||
peerHeights := make([]int64, numPeers)
|
||||
for i := 0; i < numPeers; i++ {
|
||||
if i == 0 {
|
||||
peerHeights[0] = numBlocks
|
||||
continue
|
||||
}
|
||||
if randomPeerHeights {
|
||||
peerHeights[i] = int64(tmmath.MaxInt(tmrand.Intn(int(numBlocks)), int(startingHeight)+1))
|
||||
} else {
|
||||
peerHeights[i] = numBlocks
|
||||
}
|
||||
}
|
||||
|
||||
// Approximate the slice capacity to save time for appends.
|
||||
testSteps := make([]fsmStepTestValues, 0, 3*numBlocks+int64(numPeers))
|
||||
|
||||
testName := fmt.Sprintf("%v-blocks %v-startingHeight %v-peers %v-maxRequestsPerPeer %v-maxNumRequests",
|
||||
numBlocks, startingHeight, numPeers, maxRequestsPerPeer, maxPendingRequests)
|
||||
|
||||
// Add startFSMEv step.
|
||||
testSteps = append(testSteps, sStartFSMEv())
|
||||
|
||||
// For each peer, add statusResponseEv step.
|
||||
for i := 0; i < numPeers; i++ {
|
||||
peerName := fmt.Sprintf("P%d", i)
|
||||
if i == 0 {
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sStatusEv("waitForPeer", "waitForBlock", p2p.ID(peerName), peerHeights[i], nil))
|
||||
} else {
|
||||
testSteps = append(testSteps,
|
||||
sStatusEv("waitForBlock", "waitForBlock", p2p.ID(peerName), peerHeights[i], nil))
|
||||
}
|
||||
}
|
||||
|
||||
height := startingHeight
|
||||
numBlocksReceived := 0
|
||||
prevBlocks := make([]int64, 0, maxPendingRequests)
|
||||
|
||||
forLoop:
|
||||
for i := 0; i < int(numBlocks); i++ {
|
||||
|
||||
// Add the makeRequestEv step periodically.
|
||||
if i%maxRequestsPerPeer == 0 {
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
)
|
||||
}
|
||||
|
||||
// Add the blockRespEv step
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P0", height, prevBlocks))
|
||||
prevBlocks = append(prevBlocks, height)
|
||||
height++
|
||||
numBlocksReceived++
|
||||
|
||||
// Add the processedBlockEv step periodically.
|
||||
if numBlocksReceived >= maxRequestsPerPeer || height >= numBlocks {
|
||||
for j := int(height) - numBlocksReceived; j < int(height); j++ {
|
||||
if j >= int(numBlocks) {
|
||||
// This is the last block that is processed, we should be in "finished" state.
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil))
|
||||
break forLoop
|
||||
}
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil))
|
||||
}
|
||||
numBlocksReceived = 0
|
||||
prevBlocks = make([]int64, 0, maxPendingRequests)
|
||||
}
|
||||
}
|
||||
|
||||
return testFields{
|
||||
name: testName,
|
||||
startingHeight: startingHeight,
|
||||
maxRequestsPerPeer: maxRequestsPerPeer,
|
||||
maxPendingRequests: maxPendingRequests,
|
||||
steps: testSteps,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
maxStartingHeightTest = 100
|
||||
maxRequestsPerPeerTest = 20
|
||||
maxTotalPendingRequestsTest = 600
|
||||
maxNumPeersTest = 1000
|
||||
maxNumBlocksInChainTest = 10000 //should be smaller than 9999999
|
||||
)
|
||||
|
||||
func makeCorrectTransitionSequenceWithRandomParameters() testFields {
|
||||
// Generate a starting height for fast sync.
|
||||
startingHeight := int64(tmrand.Intn(maxStartingHeightTest) + 1)
|
||||
|
||||
// Generate the number of requests per peer.
|
||||
maxRequestsPerPeer := tmrand.Intn(maxRequestsPerPeerTest) + 1
|
||||
|
||||
// Generate the maximum number of total pending requests, >= maxRequestsPerPeer.
|
||||
maxPendingRequests := tmrand.Intn(maxTotalPendingRequestsTest-maxRequestsPerPeer) + maxRequestsPerPeer
|
||||
|
||||
// Generate the number of blocks to be synced.
|
||||
numBlocks := int64(tmrand.Intn(maxNumBlocksInChainTest)) + startingHeight
|
||||
|
||||
// Generate a number of peers.
|
||||
numPeers := tmrand.Intn(maxNumPeersTest) + 1
|
||||
|
||||
return makeCorrectTransitionSequence(startingHeight, numBlocks, numPeers, true, maxRequestsPerPeer, maxPendingRequests)
|
||||
}
|
||||
|
||||
func shouldApplyProcessedBlockEvStep(step *fsmStepTestValues, testBcR *testReactor) bool {
|
||||
if step.event == processedBlockEv {
|
||||
_, err := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height)
|
||||
if err == errMissingBlock {
|
||||
return false
|
||||
}
|
||||
_, err = testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height + 1)
|
||||
if err == errMissingBlock {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestFSMCorrectTransitionSequences(t *testing.T) {
|
||||
|
||||
tests := []testFields{
|
||||
makeCorrectTransitionSequence(1, 100, 10, true, 10, 40),
|
||||
makeCorrectTransitionSequenceWithRandomParameters(),
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create test reactor
|
||||
testBcR := newTestReactor(tt.startingHeight)
|
||||
|
||||
if tt.maxRequestsPerPeer != 0 {
|
||||
maxRequestsPerPeer = tt.maxRequestsPerPeer
|
||||
}
|
||||
|
||||
for _, step := range tt.steps {
|
||||
step := step
|
||||
assert.Equal(t, step.currentState, testBcR.fsm.state.name)
|
||||
|
||||
oldNumStatusRequests := testBcR.numStatusRequests
|
||||
fixBlockResponseEvStep(&step, testBcR)
|
||||
if !shouldApplyProcessedBlockEvStep(&step, testBcR) {
|
||||
continue
|
||||
}
|
||||
|
||||
fsmErr := sendEventToFSM(testBcR.fsm, step.event, step.data)
|
||||
assert.Equal(t, step.wantErr, fsmErr)
|
||||
|
||||
if step.wantStatusReqSent {
|
||||
assert.Equal(t, oldNumStatusRequests+1, testBcR.numStatusRequests)
|
||||
} else {
|
||||
assert.Equal(t, oldNumStatusRequests, testBcR.numStatusRequests)
|
||||
}
|
||||
|
||||
assert.Equal(t, step.wantState, testBcR.fsm.state.name)
|
||||
if step.wantState == "finished" {
|
||||
assert.True(t, testBcR.fsm.isCaughtUp())
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------
|
||||
// implements the bcRNotifier
|
||||
func (testR *testReactor) sendPeerError(err error, peerID p2p.ID) {
|
||||
testR.logger.Info("Reactor received sendPeerError call from FSM", "peer", peerID, "err", err)
|
||||
testR.lastPeerError.peerID = peerID
|
||||
testR.lastPeerError.err = err
|
||||
}
|
||||
|
||||
func (testR *testReactor) sendStatusRequest() {
|
||||
testR.logger.Info("Reactor received sendStatusRequest call from FSM")
|
||||
testR.numStatusRequests++
|
||||
}
|
||||
|
||||
func (testR *testReactor) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
testR.logger.Info("Reactor received sendBlockRequest call from FSM", "peer", peerID, "height", height)
|
||||
testR.numBlockRequests++
|
||||
testR.lastBlockRequest.peerID = peerID
|
||||
testR.lastBlockRequest.height = height
|
||||
if height == 9999999 {
|
||||
// simulate switch does not have peer
|
||||
return errNilPeerForBlockRequest
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (testR *testReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
|
||||
testR.logger.Info("Reactor received resetStateTimer call from FSM", "state", name, "timeout", timeout)
|
||||
if _, ok := testR.stateTimerStarts[name]; !ok {
|
||||
testR.stateTimerStarts[name] = 1
|
||||
} else {
|
||||
testR.stateTimerStarts[name]++
|
||||
}
|
||||
}
|
||||
|
||||
func (testR *testReactor) switchToConsensus() {
|
||||
}
|
||||
|
||||
// ----------------------------------------
|
||||
429
blockchain/v1/reactor_test.go
Normal file
429
blockchain/v1/reactor_test.go
Normal file
@@ -0,0 +1,429 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
var config *cfg.Config
|
||||
|
||||
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
for i := 0; i < numValidators; i++ {
|
||||
val, privVal := types.RandValidator(randPower, minPower)
|
||||
validators[i] = types.GenesisValidator{
|
||||
PubKey: val.PubKey,
|
||||
Power: val.VotingPower,
|
||||
}
|
||||
privValidators[i] = privVal
|
||||
}
|
||||
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
||||
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: config.ChainID(),
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
func makeVote(
|
||||
header *types.Header,
|
||||
blockID types.BlockID,
|
||||
valset *types.ValidatorSet,
|
||||
privVal types.PrivValidator) *types.Vote {
|
||||
addr := privVal.GetPubKey().Address()
|
||||
idx, _ := valset.GetByAddress(addr)
|
||||
vote := &types.Vote{
|
||||
ValidatorAddress: addr,
|
||||
ValidatorIndex: idx,
|
||||
Height: header.Height,
|
||||
Round: 1,
|
||||
Timestamp: tmtime.Now(),
|
||||
Type: types.PrecommitType,
|
||||
BlockID: blockID,
|
||||
}
|
||||
|
||||
_ = privVal.SignVote(header.ChainID, vote)
|
||||
|
||||
return vote
|
||||
}
|
||||
|
||||
type BlockchainReactorPair struct {
|
||||
bcR *BlockchainReactor
|
||||
conR *consensusReactorTest
|
||||
}
|
||||
|
||||
func newBlockchainReactor(
|
||||
logger log.Logger,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64) *BlockchainReactor {
|
||||
if len(privVals) != 1 {
|
||||
panic("only support one validator")
|
||||
}
|
||||
|
||||
app := &testApp{}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "error start app"))
|
||||
}
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "error constructing state from genesis file"))
|
||||
}
|
||||
|
||||
// Make the BlockchainReactor itself.
|
||||
// NOTE we have to create and commit the blocks first because
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
blockExec := sm.NewBlockExecutor(db, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.MockEvidencePool{})
|
||||
sm.SaveState(db, state)
|
||||
|
||||
// let's add some blocks in
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := types.NewCommit(blockHeight-1, 1, types.BlockID{}, nil)
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
|
||||
vote := makeVote(&lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0])
|
||||
lastCommit = types.NewCommit(vote.Height, vote.Round, lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartsHeader: thisParts.Header()}
|
||||
|
||||
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "error apply block"))
|
||||
}
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
|
||||
bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||
bcReactor.SetLogger(logger.With("module", "blockchain"))
|
||||
|
||||
return bcReactor
|
||||
}
|
||||
|
||||
func newBlockchainReactorPair(
|
||||
logger log.Logger,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64) BlockchainReactorPair {
|
||||
|
||||
consensusReactor := &consensusReactorTest{}
|
||||
consensusReactor.BaseReactor = *p2p.NewBaseReactor("Consensus reactor", consensusReactor)
|
||||
|
||||
return BlockchainReactorPair{
|
||||
newBlockchainReactor(logger, genDoc, privVals, maxBlockHeight),
|
||||
consensusReactor}
|
||||
}
|
||||
|
||||
type consensusReactorTest struct {
|
||||
p2p.BaseReactor // BaseService + p2p.Switch
|
||||
switchedToConsensus bool
|
||||
mtx sync.Mutex
|
||||
}
|
||||
|
||||
func (conR *consensusReactorTest) SwitchToConsensus(state sm.State, blocksSynced int) {
|
||||
conR.mtx.Lock()
|
||||
defer conR.mtx.Unlock()
|
||||
conR.switchedToConsensus = true
|
||||
}
|
||||
|
||||
func TestFastSyncNoBlockResponse(t *testing.T) {
|
||||
|
||||
config = cfg.ResetTestRoot("blockchain_new_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
maxBlockHeight := int64(65)
|
||||
|
||||
reactorPairs := make([]BlockchainReactorPair, 2)
|
||||
|
||||
logger := log.TestingLogger()
|
||||
reactorPairs[0] = newBlockchainReactorPair(logger, genDoc, privVals, maxBlockHeight)
|
||||
reactorPairs[1] = newBlockchainReactorPair(logger, genDoc, privVals, 0)
|
||||
|
||||
p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].bcR)
|
||||
s.AddReactor("CONSENSUS", reactorPairs[i].conR)
|
||||
moduleName := fmt.Sprintf("blockchain-%v", i)
|
||||
reactorPairs[i].bcR.SetLogger(logger.With("module", moduleName))
|
||||
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
_ = r.bcR.Stop()
|
||||
_ = r.conR.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
tests := []struct {
|
||||
height int64
|
||||
existent bool
|
||||
}{
|
||||
{maxBlockHeight + 2, false},
|
||||
{10, true},
|
||||
{1, true},
|
||||
{maxBlockHeight + 100, false},
|
||||
}
|
||||
|
||||
for {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
reactorPairs[1].conR.mtx.Lock()
|
||||
if reactorPairs[1].conR.switchedToConsensus {
|
||||
reactorPairs[1].conR.mtx.Unlock()
|
||||
break
|
||||
}
|
||||
reactorPairs[1].conR.mtx.Unlock()
|
||||
}
|
||||
|
||||
assert.Equal(t, maxBlockHeight, reactorPairs[0].bcR.store.Height())
|
||||
|
||||
for _, tt := range tests {
|
||||
block := reactorPairs[1].bcR.store.LoadBlock(tt.height)
|
||||
if tt.existent {
|
||||
assert.True(t, block != nil)
|
||||
} else {
|
||||
assert.True(t, block == nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: This is too hard to test without
|
||||
// an easy way to add test peer to switch
|
||||
// or without significant refactoring of the module.
|
||||
// Alternatively we could actually dial a TCP conn but
|
||||
// that seems extreme.
|
||||
func TestFastSyncBadBlockStopsPeer(t *testing.T) {
|
||||
numNodes := 4
|
||||
maxBlockHeight := int64(148)
|
||||
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
otherChain := newBlockchainReactorPair(log.TestingLogger(), genDoc, privVals, maxBlockHeight)
|
||||
defer func() {
|
||||
_ = otherChain.bcR.Stop()
|
||||
_ = otherChain.conR.Stop()
|
||||
}()
|
||||
|
||||
reactorPairs := make([]BlockchainReactorPair, numNodes)
|
||||
logger := make([]log.Logger, numNodes)
|
||||
|
||||
for i := 0; i < numNodes; i++ {
|
||||
logger[i] = log.TestingLogger()
|
||||
height := int64(0)
|
||||
if i == 0 {
|
||||
height = maxBlockHeight
|
||||
}
|
||||
reactorPairs[i] = newBlockchainReactorPair(logger[i], genDoc, privVals, height)
|
||||
}
|
||||
|
||||
switches := p2p.MakeConnectedSwitches(config.P2P, numNodes, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
reactorPairs[i].conR.mtx.Lock()
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].bcR)
|
||||
s.AddReactor("CONSENSUS", reactorPairs[i].conR)
|
||||
moduleName := fmt.Sprintf("blockchain-%v", i)
|
||||
reactorPairs[i].bcR.SetLogger(logger[i].With("module", moduleName))
|
||||
reactorPairs[i].conR.mtx.Unlock()
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
_ = r.bcR.Stop()
|
||||
_ = r.conR.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
outerFor:
|
||||
for {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
for i := 0; i < numNodes; i++ {
|
||||
reactorPairs[i].conR.mtx.Lock()
|
||||
if !reactorPairs[i].conR.switchedToConsensus {
|
||||
reactorPairs[i].conR.mtx.Unlock()
|
||||
continue outerFor
|
||||
}
|
||||
reactorPairs[i].conR.mtx.Unlock()
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
//at this time, reactors[0-3] is the newest
|
||||
assert.Equal(t, numNodes-1, reactorPairs[1].bcR.Switch.Peers().Size())
|
||||
|
||||
//mark last reactorPair as an invalid peer
|
||||
reactorPairs[numNodes-1].bcR.store = otherChain.bcR.store
|
||||
|
||||
lastLogger := log.TestingLogger()
|
||||
lastReactorPair := newBlockchainReactorPair(lastLogger, genDoc, privVals, 0)
|
||||
reactorPairs = append(reactorPairs, lastReactorPair)
|
||||
|
||||
switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[len(reactorPairs)-1].bcR)
|
||||
s.AddReactor("CONSENSUS", reactorPairs[len(reactorPairs)-1].conR)
|
||||
moduleName := fmt.Sprintf("blockchain-%v", len(reactorPairs)-1)
|
||||
reactorPairs[len(reactorPairs)-1].bcR.SetLogger(lastLogger.With("module", moduleName))
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)...)
|
||||
|
||||
for i := 0; i < len(reactorPairs)-1; i++ {
|
||||
p2p.Connect2Switches(switches, i, len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
for {
|
||||
time.Sleep(1 * time.Second)
|
||||
lastReactorPair.conR.mtx.Lock()
|
||||
if lastReactorPair.conR.switchedToConsensus {
|
||||
lastReactorPair.conR.mtx.Unlock()
|
||||
break
|
||||
}
|
||||
lastReactorPair.conR.mtx.Unlock()
|
||||
|
||||
if lastReactorPair.bcR.Switch.Peers().Size() == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, lastReactorPair.bcR.Switch.Peers().Size() < len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
func TestBcBlockRequestMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
requestHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Request Message", 0, false},
|
||||
{"Valid Request Message", 1, false},
|
||||
{"Invalid Request Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
request := bcBlockRequestMessage{Height: tc.requestHeight}
|
||||
assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcNoBlockResponseMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
nonResponseHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Non-Response Message", 0, false},
|
||||
{"Valid Non-Response Message", 1, false},
|
||||
{"Invalid Non-Response Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
nonResponse := bcNoBlockResponseMessage{Height: tc.nonResponseHeight}
|
||||
assert.Equal(t, tc.expectErr, nonResponse.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcStatusRequestMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
requestHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Request Message", 0, false},
|
||||
{"Valid Request Message", 1, false},
|
||||
{"Invalid Request Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
request := bcStatusRequestMessage{Height: tc.requestHeight}
|
||||
assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcStatusResponseMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
responseHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Response Message", 0, false},
|
||||
{"Valid Response Message", 1, false},
|
||||
{"Invalid Response Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
response := bcStatusResponseMessage{Height: tc.responseHeight}
|
||||
assert.Equal(t, tc.expectErr, response.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// utility funcs
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
|
||||
type testApp struct {
|
||||
abci.BaseApplication
|
||||
}
|
||||
124
blockchain/v2/metrics.go
Normal file
124
blockchain/v2/metrics.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics"
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
"github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
// MetricsSubsystem is a subsystem shared by all metrics exposed by this
|
||||
// package.
|
||||
MetricsSubsystem = "blockchain"
|
||||
)
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
// events_in
|
||||
EventsIn metrics.Counter
|
||||
// events_in
|
||||
EventsHandled metrics.Counter
|
||||
// events_out
|
||||
EventsOut metrics.Counter
|
||||
// errors_in
|
||||
ErrorsIn metrics.Counter
|
||||
// errors_handled
|
||||
ErrorsHandled metrics.Counter
|
||||
// errors_out
|
||||
ErrorsOut metrics.Counter
|
||||
// events_shed
|
||||
EventsShed metrics.Counter
|
||||
// events_sent
|
||||
EventsSent metrics.Counter
|
||||
// errors_sent
|
||||
ErrorsSent metrics.Counter
|
||||
// errors_shed
|
||||
ErrorsShed metrics.Counter
|
||||
}
|
||||
|
||||
// Can we burn in the routine name here?
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
EventsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_in",
|
||||
Help: "Events read from the channel.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_handled",
|
||||
Help: "Events handled",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_out",
|
||||
Help: "Events output from routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_in",
|
||||
Help: "Errors read from the channel.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_handled",
|
||||
Help: "Errors handled.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_out",
|
||||
Help: "Errors output from routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_sent",
|
||||
Help: "Errors sent to routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_shed",
|
||||
Help: "Errors dropped from sending.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_sent",
|
||||
Help: "Events sent to routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_shed",
|
||||
Help: "Events dropped from sending.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
// NopMetrics returns no-op Metrics.
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
EventsIn: discard.NewCounter(),
|
||||
EventsHandled: discard.NewCounter(),
|
||||
EventsOut: discard.NewCounter(),
|
||||
ErrorsIn: discard.NewCounter(),
|
||||
ErrorsHandled: discard.NewCounter(),
|
||||
ErrorsOut: discard.NewCounter(),
|
||||
EventsShed: discard.NewCounter(),
|
||||
EventsSent: discard.NewCounter(),
|
||||
ErrorsSent: discard.NewCounter(),
|
||||
ErrorsShed: discard.NewCounter(),
|
||||
}
|
||||
}
|
||||
187
blockchain/v2/processor.go
Normal file
187
blockchain/v2/processor.go
Normal file
@@ -0,0 +1,187 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tdState "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type peerError struct {
|
||||
priorityHigh
|
||||
peerID p2p.ID
|
||||
}
|
||||
|
||||
type pcDuplicateBlock struct {
|
||||
priorityNormal
|
||||
}
|
||||
|
||||
type pcShortBlock struct {
|
||||
priorityNormal
|
||||
}
|
||||
|
||||
type pcBlockVerificationFailure struct {
|
||||
priorityNormal
|
||||
height int64
|
||||
firstPeerID p2p.ID
|
||||
secondPeerID p2p.ID
|
||||
}
|
||||
|
||||
type pcBlockProcessed struct {
|
||||
priorityNormal
|
||||
height int64
|
||||
peerID p2p.ID
|
||||
}
|
||||
|
||||
type pcProcessBlock struct {
|
||||
priorityNormal
|
||||
}
|
||||
|
||||
type pcStop struct {
|
||||
priorityNormal
|
||||
}
|
||||
|
||||
type pcFinished struct {
|
||||
priorityNormal
|
||||
height int64
|
||||
blocksSynced int64
|
||||
}
|
||||
|
||||
func (p pcFinished) Error() string {
|
||||
return "finished"
|
||||
}
|
||||
|
||||
type queueItem struct {
|
||||
block *types.Block
|
||||
peerID p2p.ID
|
||||
}
|
||||
|
||||
type blockQueue map[int64]queueItem
|
||||
|
||||
type pcState struct {
|
||||
height int64 // height of the last synced block
|
||||
queue blockQueue // blocks waiting to be processed
|
||||
chainID string
|
||||
blocksSynced int64
|
||||
draining bool
|
||||
tdState tdState.State
|
||||
context processorContext
|
||||
}
|
||||
|
||||
func (state *pcState) String() string {
|
||||
return fmt.Sprintf("height: %d queue length: %d draining: %v blocks synced: %d",
|
||||
state.height, len(state.queue), state.draining, state.blocksSynced)
|
||||
}
|
||||
|
||||
// newPcState returns a pcState initialized with the last verified block enqueued
|
||||
func newPcState(initHeight int64, tdState tdState.State, chainID string, context processorContext) *pcState {
|
||||
return &pcState{
|
||||
height: initHeight,
|
||||
queue: blockQueue{},
|
||||
chainID: chainID,
|
||||
draining: false,
|
||||
blocksSynced: 0,
|
||||
context: context,
|
||||
tdState: tdState,
|
||||
}
|
||||
}
|
||||
|
||||
// nextTwo returns the next two unverified blocks
|
||||
func (state *pcState) nextTwo() (queueItem, queueItem, error) {
|
||||
if first, ok := state.queue[state.height+1]; ok {
|
||||
if second, ok := state.queue[state.height+2]; ok {
|
||||
return first, second, nil
|
||||
}
|
||||
}
|
||||
return queueItem{}, queueItem{}, fmt.Errorf("not found")
|
||||
}
|
||||
|
||||
// synced returns true when at most the last verified block remains in the queue
|
||||
func (state *pcState) synced() bool {
|
||||
return len(state.queue) <= 1
|
||||
}
|
||||
|
||||
func (state *pcState) advance() {
|
||||
state.height++
|
||||
delete(state.queue, state.height)
|
||||
state.blocksSynced++
|
||||
}
|
||||
|
||||
func (state *pcState) enqueue(peerID p2p.ID, block *types.Block, height int64) error {
|
||||
if _, ok := state.queue[height]; ok {
|
||||
return fmt.Errorf("duplicate queue item")
|
||||
}
|
||||
state.queue[height] = queueItem{block: block, peerID: peerID}
|
||||
return nil
|
||||
}
|
||||
|
||||
// purgePeer moves all unprocessed blocks from the queue
|
||||
func (state *pcState) purgePeer(peerID p2p.ID) {
|
||||
// what if height is less than state.height?
|
||||
for height, item := range state.queue {
|
||||
if item.peerID == peerID {
|
||||
delete(state.queue, height)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handle processes FSM events
|
||||
func (state *pcState) handle(event Event) (Event, error) {
|
||||
switch event := event.(type) {
|
||||
case *scBlockReceived:
|
||||
if event.block == nil {
|
||||
panic("processor received an event with a nil block")
|
||||
}
|
||||
if event.block.Height <= state.height {
|
||||
return pcShortBlock{}, nil
|
||||
}
|
||||
err := state.enqueue(event.peerID, event.block, event.block.Height)
|
||||
if err != nil {
|
||||
return pcDuplicateBlock{}, nil
|
||||
}
|
||||
|
||||
case pcProcessBlock:
|
||||
firstItem, secondItem, err := state.nextTwo()
|
||||
if err != nil {
|
||||
if state.draining {
|
||||
return noOp, pcFinished{height: state.height}
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
first, second := firstItem.block, secondItem.block
|
||||
|
||||
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstPartsHeader := firstParts.Header()
|
||||
firstID := types.BlockID{Hash: first.Hash(), PartsHeader: firstPartsHeader}
|
||||
|
||||
err = state.context.verifyCommit(state.chainID, firstID, first.Height, second.LastCommit)
|
||||
if err != nil {
|
||||
state.purgePeer(firstItem.peerID)
|
||||
state.purgePeer(secondItem.peerID)
|
||||
return pcBlockVerificationFailure{
|
||||
height: first.Height, firstPeerID: firstItem.peerID, secondPeerID: secondItem.peerID},
|
||||
nil
|
||||
}
|
||||
|
||||
state.context.saveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
state.tdState, err = state.context.applyBlock(state.tdState, firstID, first)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
state.advance()
|
||||
return pcBlockProcessed{height: first.Height, peerID: firstItem.peerID}, nil
|
||||
|
||||
case *peerError:
|
||||
state.purgePeer(event.peerID)
|
||||
|
||||
case pcStop:
|
||||
if state.synced() {
|
||||
return noOp, pcFinished{height: state.height, blocksSynced: state.blocksSynced}
|
||||
}
|
||||
state.draining = true
|
||||
}
|
||||
|
||||
return noOp, nil
|
||||
}
|
||||
76
blockchain/v2/processor_context.go
Normal file
76
blockchain/v2/processor_context.go
Normal file
@@ -0,0 +1,76 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type processorContext interface {
|
||||
applyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, error)
|
||||
verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error
|
||||
saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit)
|
||||
}
|
||||
|
||||
// nolint:unused
|
||||
type pContext struct {
|
||||
store *store.BlockStore
|
||||
executor *state.BlockExecutor
|
||||
state *state.State
|
||||
}
|
||||
|
||||
// nolint:unused,deadcode
|
||||
func newProcessorContext(st *store.BlockStore, ex *state.BlockExecutor, s *state.State) *pContext {
|
||||
return &pContext{
|
||||
store: st,
|
||||
executor: ex,
|
||||
state: s,
|
||||
}
|
||||
}
|
||||
|
||||
func (pc *pContext) applyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, error) {
|
||||
return pc.executor.ApplyBlock(state, blockID, block)
|
||||
}
|
||||
|
||||
func (pc *pContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error {
|
||||
return pc.state.Validators.VerifyCommit(chainID, blockID, height, commit)
|
||||
}
|
||||
|
||||
func (pc *pContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
pc.store.SaveBlock(block, blockParts, seenCommit)
|
||||
}
|
||||
|
||||
type mockPContext struct {
|
||||
applicationBL []int64
|
||||
verificationBL []int64
|
||||
}
|
||||
|
||||
func newMockProcessorContext(verificationBlackList []int64, applicationBlackList []int64) *mockPContext {
|
||||
return &mockPContext{
|
||||
applicationBL: applicationBlackList,
|
||||
verificationBL: verificationBlackList,
|
||||
}
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) applyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, error) {
|
||||
for _, h := range mpc.applicationBL {
|
||||
if h == block.Height {
|
||||
return state, fmt.Errorf("generic application error")
|
||||
}
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error {
|
||||
for _, h := range mpc.verificationBL {
|
||||
if h == height {
|
||||
return fmt.Errorf("generic verification error")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
}
|
||||
356
blockchain/v2/processor_test.go
Normal file
356
blockchain/v2/processor_test.go
Normal file
@@ -0,0 +1,356 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tdState "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// pcBlock is a test helper structure with simple types. Its purpose is to help with test readability.
|
||||
type pcBlock struct {
|
||||
pid string
|
||||
height int64
|
||||
}
|
||||
|
||||
// params is a test structure used to create processor state.
|
||||
type params struct {
|
||||
height int64
|
||||
items []pcBlock
|
||||
blocksSynced int64
|
||||
verBL []int64
|
||||
appBL []int64
|
||||
draining bool
|
||||
}
|
||||
|
||||
// makePcBlock makes an empty block.
|
||||
func makePcBlock(height int64) *types.Block {
|
||||
return &types.Block{Header: types.Header{Height: height}}
|
||||
}
|
||||
|
||||
// makeState takes test parameters and creates a specific processor state.
|
||||
func makeState(p *params) *pcState {
|
||||
var (
|
||||
tdState = tdState.State{}
|
||||
context = newMockProcessorContext(p.verBL, p.appBL)
|
||||
)
|
||||
state := newPcState(p.height, tdState, "test", context)
|
||||
|
||||
for _, item := range p.items {
|
||||
_ = state.enqueue(p2p.ID(item.pid), makePcBlock(item.height), item.height)
|
||||
}
|
||||
|
||||
state.blocksSynced = p.blocksSynced
|
||||
state.draining = p.draining
|
||||
return state
|
||||
}
|
||||
|
||||
func mBlockResponse(peerID p2p.ID, height int64) *scBlockReceived {
|
||||
return &scBlockReceived{
|
||||
peerID: peerID,
|
||||
block: makePcBlock(height),
|
||||
}
|
||||
}
|
||||
|
||||
type pcFsmMakeStateValues struct {
|
||||
currentState *params
|
||||
event Event
|
||||
wantState *params
|
||||
wantNextEvent Event
|
||||
wantErr error
|
||||
wantPanic bool
|
||||
}
|
||||
|
||||
type testFields struct {
|
||||
name string
|
||||
steps []pcFsmMakeStateValues
|
||||
}
|
||||
|
||||
func executeProcessorTests(t *testing.T, tests []testFields) {
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var state *pcState
|
||||
for _, step := range tt.steps {
|
||||
defer func() {
|
||||
r := recover()
|
||||
if (r != nil) != step.wantPanic {
|
||||
t.Errorf("recover = %v, wantPanic = %v", r, step.wantPanic)
|
||||
}
|
||||
}()
|
||||
|
||||
// First step must always initialise the currentState as state.
|
||||
if step.currentState != nil {
|
||||
state = makeState(step.currentState)
|
||||
}
|
||||
if state == nil {
|
||||
panic("Bad (initial?) step")
|
||||
}
|
||||
|
||||
nextEvent, err := state.handle(step.event)
|
||||
t.Log(state)
|
||||
assert.Equal(t, step.wantErr, err)
|
||||
assert.Equal(t, makeState(step.wantState), state)
|
||||
assert.Equal(t, step.wantNextEvent, nextEvent)
|
||||
// Next step may use the wantedState as their currentState.
|
||||
state = makeState(step.wantState)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPcBlockResponse(t *testing.T) {
|
||||
tests := []testFields{
|
||||
|
||||
{
|
||||
name: "add one block",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{}, event: mBlockResponse("P1", 1),
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 1}}}, wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add two blocks",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{}, event: mBlockResponse("P1", 3),
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 3}}}, wantNextEvent: noOp,
|
||||
},
|
||||
{ // use previous wantState as currentState,
|
||||
event: mBlockResponse("P1", 4),
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 3}, {"P1", 4}}}, wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add duplicate block from same peer",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{}, event: mBlockResponse("P1", 3),
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 3}}}, wantNextEvent: noOp,
|
||||
},
|
||||
{ // use previous wantState as currentState,
|
||||
event: mBlockResponse("P1", 3),
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 3}}}, wantNextEvent: pcDuplicateBlock{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "add duplicate block from different peer",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{}, event: mBlockResponse("P1", 3),
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 3}}}, wantNextEvent: noOp,
|
||||
},
|
||||
{ // use previous wantState as currentState,
|
||||
event: mBlockResponse("P2", 3),
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 3}}}, wantNextEvent: pcDuplicateBlock{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "attempt to add block with height equal to state.height",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 2, items: []pcBlock{{"P1", 3}}}, event: mBlockResponse("P1", 2),
|
||||
wantState: ¶ms{height: 2, items: []pcBlock{{"P1", 3}}}, wantNextEvent: pcShortBlock{},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "attempt to add block with height smaller than state.height",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 2, items: []pcBlock{{"P1", 3}}}, event: mBlockResponse("P1", 1),
|
||||
wantState: ¶ms{height: 2, items: []pcBlock{{"P1", 3}}}, wantNextEvent: pcShortBlock{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
|
||||
func TestPcProcessBlockSuccess(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "noop - no blocks over current height",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{}, event: pcProcessBlock{},
|
||||
wantState: ¶ms{}, wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "noop - high new blocks",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 5, items: []pcBlock{{"P1", 30}, {"P2", 31}}}, event: pcProcessBlock{},
|
||||
wantState: ¶ms{height: 5, items: []pcBlock{{"P1", 30}, {"P2", 31}}}, wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, event: pcProcessBlock{},
|
||||
wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}}, blocksSynced: 1},
|
||||
wantNextEvent: pcBlockProcessed{height: 1, peerID: "P1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present after draining",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{ // some contiguous blocks - on stop check draining is set
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P1", 4}}}, event: pcStop{},
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P1", 4}}, draining: true},
|
||||
wantNextEvent: noOp,
|
||||
},
|
||||
{
|
||||
event: pcProcessBlock{},
|
||||
wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true},
|
||||
wantNextEvent: pcBlockProcessed{height: 1, peerID: "P1"},
|
||||
},
|
||||
{ // finish when H+1 or/and H+2 are missing
|
||||
event: pcProcessBlock{},
|
||||
wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true},
|
||||
wantNextEvent: noOp,
|
||||
wantErr: pcFinished{height: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
|
||||
func TestPcProcessBlockFailures(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "blocks H+1 and H+2 present from different peers - H+1 verification fails ",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}, verBL: []int64{1}}, event: pcProcessBlock{},
|
||||
wantState: ¶ms{items: []pcBlock{}, verBL: []int64{1}},
|
||||
wantNextEvent: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present from same peer - H+1 applyBlock fails ",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}, appBL: []int64{1}}, event: pcProcessBlock{},
|
||||
wantState: ¶ms{items: []pcBlock{}, appBL: []int64{1}}, wantPanic: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present from same peers - H+1 verification fails ",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P1", 2}, {"P2", 3}}, verBL: []int64{1}},
|
||||
event: pcProcessBlock{},
|
||||
wantState: ¶ms{items: []pcBlock{{"P2", 3}}, verBL: []int64{1}},
|
||||
wantNextEvent: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "blocks H+1 and H+2 present from different peers - H+1 applyBlock fails ",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P2", 3}}, appBL: []int64{1}},
|
||||
event: pcProcessBlock{},
|
||||
wantState: ¶ms{items: []pcBlock{{"P2", 3}}, appBL: []int64{1}}, wantPanic: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
|
||||
func TestPcPeerError(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "peer not present",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, event: &peerError{peerID: "P3"},
|
||||
wantState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}},
|
||||
wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "some blocks are from errored peer",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 100}, {"P1", 99}, {"P2", 101}}}, event: &peerError{peerID: "P1"},
|
||||
wantState: ¶ms{items: []pcBlock{{"P2", 101}}},
|
||||
wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "all blocks are from errored peer",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{items: []pcBlock{{"P1", 100}, {"P1", 99}}}, event: &peerError{peerID: "P1"},
|
||||
wantState: ¶ms{},
|
||||
wantNextEvent: noOp,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
|
||||
func TestStop(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "no blocks",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100}, event: pcStop{},
|
||||
wantState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100},
|
||||
wantNextEvent: noOp,
|
||||
wantErr: pcFinished{height: 100, blocksSynced: 100},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "maxHeight+1 block present",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 100, items: []pcBlock{{"P1", 101}}, blocksSynced: 100}, event: pcStop{},
|
||||
wantState: ¶ms{height: 100, items: []pcBlock{{"P1", 101}}, blocksSynced: 100},
|
||||
wantNextEvent: noOp,
|
||||
wantErr: pcFinished{height: 100, blocksSynced: 100},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "more blocks present",
|
||||
steps: []pcFsmMakeStateValues{
|
||||
{
|
||||
currentState: ¶ms{height: 100, items: []pcBlock{{"P1", 101}, {"P1", 102}}, blocksSynced: 100}, event: pcStop{},
|
||||
wantState: ¶ms{height: 100, items: []pcBlock{{"P1", 101}, {"P1", 102}}, blocksSynced: 100, draining: true},
|
||||
wantNextEvent: noOp,
|
||||
wantErr: nil,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeProcessorTests(t, tests)
|
||||
}
|
||||
118
blockchain/v2/reactor.go
Normal file
118
blockchain/v2/reactor.go
Normal file
@@ -0,0 +1,118 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
type timeCheck struct {
|
||||
priorityHigh
|
||||
time time.Time
|
||||
}
|
||||
|
||||
func schedulerHandle(event Event) (Event, error) {
|
||||
if _, ok := event.(timeCheck); ok {
|
||||
fmt.Println("scheduler handle timeCheck")
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func processorHandle(event Event) (Event, error) {
|
||||
if _, ok := event.(timeCheck); ok {
|
||||
fmt.Println("processor handle timeCheck")
|
||||
}
|
||||
return noOp, nil
|
||||
|
||||
}
|
||||
|
||||
type Reactor struct {
|
||||
events chan Event
|
||||
stopDemux chan struct{}
|
||||
scheduler *Routine
|
||||
processor *Routine
|
||||
ticker *time.Ticker
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func NewReactor(bufferSize int) *Reactor {
|
||||
return &Reactor{
|
||||
events: make(chan Event, bufferSize),
|
||||
stopDemux: make(chan struct{}),
|
||||
scheduler: newRoutine("scheduler", schedulerHandle, bufferSize),
|
||||
processor: newRoutine("processor", processorHandle, bufferSize),
|
||||
ticker: time.NewTicker(1 * time.Second),
|
||||
logger: log.NewNopLogger(),
|
||||
}
|
||||
}
|
||||
|
||||
// nolint:unused
|
||||
func (r *Reactor) setLogger(logger log.Logger) {
|
||||
r.logger = logger
|
||||
r.scheduler.setLogger(logger)
|
||||
r.processor.setLogger(logger)
|
||||
}
|
||||
|
||||
func (r *Reactor) Start() {
|
||||
go r.scheduler.start()
|
||||
go r.processor.start()
|
||||
go r.demux()
|
||||
|
||||
<-r.scheduler.ready()
|
||||
<-r.processor.ready()
|
||||
|
||||
go func() {
|
||||
for t := range r.ticker.C {
|
||||
r.events <- timeCheck{time: t}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// XXX: How to make this deterministic?
|
||||
// XXX: Would it be possible here to provide some kind of type safety for the types
|
||||
// of events that each routine can produce and consume?
|
||||
func (r *Reactor) demux() {
|
||||
for {
|
||||
select {
|
||||
case event := <-r.events:
|
||||
// XXX: check for backpressure
|
||||
r.scheduler.send(event)
|
||||
r.processor.send(event)
|
||||
case <-r.stopDemux:
|
||||
r.logger.Info("demuxing stopped")
|
||||
return
|
||||
case event := <-r.scheduler.next():
|
||||
r.processor.send(event)
|
||||
case event := <-r.processor.next():
|
||||
r.scheduler.send(event)
|
||||
case err := <-r.scheduler.final():
|
||||
r.logger.Info(fmt.Sprintf("scheduler final %s", err))
|
||||
case err := <-r.processor.final():
|
||||
r.logger.Info(fmt.Sprintf("processor final %s", err))
|
||||
// XXX: switch to consensus
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reactor) Stop() {
|
||||
r.logger.Info("reactor stopping")
|
||||
|
||||
r.ticker.Stop()
|
||||
r.scheduler.stop()
|
||||
r.processor.stop()
|
||||
close(r.stopDemux)
|
||||
close(r.events)
|
||||
|
||||
r.logger.Info("reactor stopped")
|
||||
}
|
||||
|
||||
func (r *Reactor) Receive(event Event) {
|
||||
// XXX: decode and serialize write events
|
||||
// TODO: backpressure
|
||||
r.events <- event
|
||||
}
|
||||
|
||||
func (r *Reactor) AddPeer() {
|
||||
// TODO: add peer event and send to demuxer
|
||||
}
|
||||
22
blockchain/v2/reactor_test.go
Normal file
22
blockchain/v2/reactor_test.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReactor(t *testing.T) {
|
||||
var (
|
||||
bufferSize = 10
|
||||
reactor = NewReactor(bufferSize)
|
||||
)
|
||||
|
||||
reactor.Start()
|
||||
script := []Event{
|
||||
// TODO
|
||||
}
|
||||
|
||||
for _, event := range script {
|
||||
reactor.Receive(event)
|
||||
}
|
||||
reactor.Stop()
|
||||
}
|
||||
134
blockchain/v2/routine.go
Normal file
134
blockchain/v2/routine.go
Normal file
@@ -0,0 +1,134 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/Workiva/go-datastructures/queue"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
type handleFunc = func(event Event) (Event, error)
|
||||
|
||||
// Routines are a structure which model a finite state machine as serialized
|
||||
// stream of events processed by a handle function. This Routine structure
|
||||
// handles the concurrency and messaging guarantees. Events are sent via
|
||||
// `send` are handled by the `handle` function to produce an iterator
|
||||
// `next()`. Calling `stop()` on a routine will conclude processing of all
|
||||
// sent events and produce `final()` event representing the terminal state.
|
||||
type Routine struct {
|
||||
name string
|
||||
handle handleFunc
|
||||
queue *queue.PriorityQueue
|
||||
out chan Event
|
||||
fin chan error
|
||||
rdy chan struct{}
|
||||
running *uint32
|
||||
logger log.Logger
|
||||
metrics *Metrics
|
||||
}
|
||||
|
||||
func newRoutine(name string, handleFunc handleFunc, bufferSize int) *Routine {
|
||||
return &Routine{
|
||||
name: name,
|
||||
handle: handleFunc,
|
||||
queue: queue.NewPriorityQueue(bufferSize, true),
|
||||
out: make(chan Event, bufferSize),
|
||||
rdy: make(chan struct{}, 1),
|
||||
fin: make(chan error, 1),
|
||||
running: new(uint32),
|
||||
logger: log.NewNopLogger(),
|
||||
metrics: NopMetrics(),
|
||||
}
|
||||
}
|
||||
|
||||
// nolint: unused
|
||||
func (rt *Routine) setLogger(logger log.Logger) {
|
||||
rt.logger = logger
|
||||
}
|
||||
|
||||
// nolint:unused
|
||||
func (rt *Routine) setMetrics(metrics *Metrics) {
|
||||
rt.metrics = metrics
|
||||
}
|
||||
|
||||
func (rt *Routine) start() {
|
||||
rt.logger.Info(fmt.Sprintf("%s: run\n", rt.name))
|
||||
running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1))
|
||||
if !running {
|
||||
panic(fmt.Sprintf("%s is already running", rt.name))
|
||||
}
|
||||
close(rt.rdy)
|
||||
defer func() {
|
||||
stopped := atomic.CompareAndSwapUint32(rt.running, uint32(1), uint32(0))
|
||||
if !stopped {
|
||||
panic(fmt.Sprintf("%s is failed to stop", rt.name))
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
events, err := rt.queue.Get(1)
|
||||
if err != nil {
|
||||
rt.logger.Info(fmt.Sprintf("%s: stopping\n", rt.name))
|
||||
rt.terminate(fmt.Errorf("stopped"))
|
||||
return
|
||||
}
|
||||
oEvent, err := rt.handle(events[0].(Event))
|
||||
rt.metrics.EventsHandled.With("routine", rt.name).Add(1)
|
||||
if err != nil {
|
||||
rt.terminate(err)
|
||||
return
|
||||
}
|
||||
rt.metrics.EventsOut.With("routine", rt.name).Add(1)
|
||||
rt.logger.Debug(fmt.Sprintf("%s produced %T %+v\n", rt.name, oEvent, oEvent))
|
||||
|
||||
rt.out <- oEvent
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: look into returning OpError in the net package
|
||||
func (rt *Routine) send(event Event) bool {
|
||||
rt.logger.Debug(fmt.Sprintf("%s: received %T %+v", rt.name, event, event))
|
||||
if !rt.isRunning() {
|
||||
return false
|
||||
}
|
||||
err := rt.queue.Put(event)
|
||||
if err != nil {
|
||||
rt.metrics.EventsShed.With("routine", rt.name).Add(1)
|
||||
rt.logger.Info(fmt.Sprintf("%s: send failed, queue was full/stopped \n", rt.name))
|
||||
return false
|
||||
}
|
||||
rt.metrics.EventsSent.With("routine", rt.name).Add(1)
|
||||
return true
|
||||
}
|
||||
|
||||
func (rt *Routine) isRunning() bool {
|
||||
return atomic.LoadUint32(rt.running) == 1
|
||||
}
|
||||
|
||||
func (rt *Routine) next() chan Event {
|
||||
return rt.out
|
||||
}
|
||||
|
||||
func (rt *Routine) ready() chan struct{} {
|
||||
return rt.rdy
|
||||
}
|
||||
|
||||
func (rt *Routine) stop() {
|
||||
if !rt.isRunning() { // XXX: this should check rt.queue.Disposed()
|
||||
return
|
||||
}
|
||||
|
||||
rt.logger.Info(fmt.Sprintf("%s: stop\n", rt.name))
|
||||
rt.queue.Dispose() // this should block until all queue items are free?
|
||||
}
|
||||
|
||||
func (rt *Routine) final() chan error {
|
||||
return rt.fin
|
||||
}
|
||||
|
||||
// XXX: Maybe get rid of this
|
||||
func (rt *Routine) terminate(reason error) {
|
||||
close(rt.out)
|
||||
rt.fin <- reason
|
||||
}
|
||||
163
blockchain/v2/routine_test.go
Normal file
163
blockchain/v2/routine_test.go
Normal file
@@ -0,0 +1,163 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type eventA struct {
|
||||
priorityNormal
|
||||
}
|
||||
|
||||
var errDone = fmt.Errorf("done")
|
||||
|
||||
func simpleHandler(event Event) (Event, error) {
|
||||
if _, ok := event.(eventA); ok {
|
||||
return noOp, errDone
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func TestRoutineFinal(t *testing.T) {
|
||||
var (
|
||||
bufferSize = 10
|
||||
routine = newRoutine("simpleRoutine", simpleHandler, bufferSize)
|
||||
)
|
||||
|
||||
assert.False(t, routine.isRunning(),
|
||||
"expected an initialized routine to not be running")
|
||||
go routine.start()
|
||||
<-routine.ready()
|
||||
assert.True(t, routine.isRunning(),
|
||||
"expected an started routine")
|
||||
|
||||
assert.True(t, routine.send(eventA{}),
|
||||
"expected sending to a ready routine to succeed")
|
||||
|
||||
assert.Equal(t, errDone, <-routine.final(),
|
||||
"expected the final event to be done")
|
||||
|
||||
assert.False(t, routine.isRunning(),
|
||||
"expected an completed routine to no longer be running")
|
||||
}
|
||||
|
||||
func TestRoutineStop(t *testing.T) {
|
||||
var (
|
||||
bufferSize = 10
|
||||
routine = newRoutine("simpleRoutine", simpleHandler, bufferSize)
|
||||
)
|
||||
|
||||
assert.False(t, routine.send(eventA{}),
|
||||
"expected sending to an unstarted routine to fail")
|
||||
|
||||
go routine.start()
|
||||
<-routine.ready()
|
||||
|
||||
assert.True(t, routine.send(eventA{}),
|
||||
"expected sending to a running routine to succeed")
|
||||
|
||||
routine.stop()
|
||||
|
||||
assert.False(t, routine.send(eventA{}),
|
||||
"expected sending to a stopped routine to fail")
|
||||
}
|
||||
|
||||
type finalCount struct {
|
||||
count int
|
||||
}
|
||||
|
||||
func (f finalCount) Error() string {
|
||||
return "end"
|
||||
}
|
||||
|
||||
func genStatefulHandler(maxCount int) handleFunc {
|
||||
counter := 0
|
||||
return func(event Event) (Event, error) {
|
||||
if _, ok := event.(eventA); ok {
|
||||
counter++
|
||||
if counter >= maxCount {
|
||||
return noOp, finalCount{counter}
|
||||
}
|
||||
|
||||
return eventA{}, nil
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
}
|
||||
|
||||
func feedback(r *Routine) {
|
||||
for event := range r.next() {
|
||||
r.send(event)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatefulRoutine(t *testing.T) {
|
||||
var (
|
||||
count = 10
|
||||
handler = genStatefulHandler(count)
|
||||
bufferSize = 20
|
||||
routine = newRoutine("statefulRoutine", handler, bufferSize)
|
||||
)
|
||||
|
||||
go routine.start()
|
||||
go feedback(routine)
|
||||
<-routine.ready()
|
||||
|
||||
assert.True(t, routine.send(eventA{}),
|
||||
"expected sending to a started routine to succeed")
|
||||
|
||||
final := <-routine.final()
|
||||
if fnl, ok := final.(finalCount); ok {
|
||||
assert.Equal(t, count, fnl.count,
|
||||
"expected the routine to count to 10")
|
||||
} else {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
type lowPriorityEvent struct {
|
||||
priorityLow
|
||||
}
|
||||
|
||||
type highPriorityEvent struct {
|
||||
priorityHigh
|
||||
}
|
||||
|
||||
func handleWithPriority(event Event) (Event, error) {
|
||||
switch event.(type) {
|
||||
case lowPriorityEvent:
|
||||
return noOp, nil
|
||||
case highPriorityEvent:
|
||||
return noOp, errDone
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func TestPriority(t *testing.T) {
|
||||
var (
|
||||
bufferSize = 20
|
||||
routine = newRoutine("priorityRoutine", handleWithPriority, bufferSize)
|
||||
)
|
||||
|
||||
go routine.start()
|
||||
<-routine.ready()
|
||||
go func() {
|
||||
for {
|
||||
routine.send(lowPriorityEvent{})
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
assert.True(t, routine.isRunning(),
|
||||
"expected an started routine")
|
||||
assert.True(t, routine.send(highPriorityEvent{}),
|
||||
"expected send to succeed even when saturated")
|
||||
|
||||
assert.Equal(t, errDone, <-routine.final())
|
||||
assert.False(t, routine.isRunning(),
|
||||
"expected an started routine")
|
||||
}
|
||||
718
blockchain/v2/scheduler.go
Normal file
718
blockchain/v2/scheduler.go
Normal file
@@ -0,0 +1,718 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Events
|
||||
|
||||
// XXX: The handle API would be much simpler if it return a single event, an
|
||||
// Event, which embeds a terminationEvent if it wants to terminate the routine.
|
||||
|
||||
// Input events into the scheduler:
|
||||
// ticker event for cleaning peers
|
||||
type tryPrunePeer struct {
|
||||
priorityHigh
|
||||
time time.Time
|
||||
}
|
||||
|
||||
// ticker event for scheduling block requests
|
||||
type trySchedule struct {
|
||||
priorityHigh
|
||||
time time.Time
|
||||
}
|
||||
|
||||
// blockResponse message received from a peer
|
||||
type bcBlockResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID p2p.ID
|
||||
height int64
|
||||
size int64
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
// statusResponse message received from a peer
|
||||
type bcStatusResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID p2p.ID
|
||||
height int64
|
||||
}
|
||||
|
||||
// new peer is connected
|
||||
type addNewPeer struct {
|
||||
priorityNormal
|
||||
peerID p2p.ID
|
||||
}
|
||||
|
||||
// Output events issued by the scheduler:
|
||||
// all blocks have been processed
|
||||
type scFinishedEv struct {
|
||||
priorityNormal
|
||||
}
|
||||
|
||||
// send a blockRequest message
|
||||
type scBlockRequest struct {
|
||||
priorityNormal
|
||||
peerID p2p.ID
|
||||
height int64
|
||||
}
|
||||
|
||||
// a block has been received and validated by the scheduler
|
||||
type scBlockReceived struct {
|
||||
priorityNormal
|
||||
peerID p2p.ID
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
// scheduler detected a peer error
|
||||
type scPeerError struct {
|
||||
priorityHigh
|
||||
peerID p2p.ID
|
||||
reason error
|
||||
}
|
||||
|
||||
// scheduler removed a set of peers (timed out or slow peer)
|
||||
type scPeersPruned struct {
|
||||
priorityHigh
|
||||
peers []p2p.ID
|
||||
}
|
||||
|
||||
// XXX: make this fatal?
|
||||
// scheduler encountered a fatal error
|
||||
type scSchedulerFail struct {
|
||||
priorityHigh
|
||||
reason error
|
||||
}
|
||||
|
||||
type blockState int
|
||||
|
||||
const (
|
||||
blockStateUnknown blockState = iota + 1 // no known peer has this block
|
||||
blockStateNew // indicates that a peer has reported having this block
|
||||
blockStatePending // indicates that this block has been requested from a peer
|
||||
blockStateReceived // indicates that this block has been received by a peer
|
||||
blockStateProcessed // indicates that this block has been applied
|
||||
)
|
||||
|
||||
func (e blockState) String() string {
|
||||
switch e {
|
||||
case blockStateUnknown:
|
||||
return "Unknown"
|
||||
case blockStateNew:
|
||||
return "New"
|
||||
case blockStatePending:
|
||||
return "Pending"
|
||||
case blockStateReceived:
|
||||
return "Received"
|
||||
case blockStateProcessed:
|
||||
return "Processed"
|
||||
default:
|
||||
return fmt.Sprintf("invalid blockState: %d", e)
|
||||
}
|
||||
}
|
||||
|
||||
type peerState int
|
||||
|
||||
const (
|
||||
peerStateNew = iota + 1
|
||||
peerStateReady
|
||||
peerStateRemoved
|
||||
)
|
||||
|
||||
func (e peerState) String() string {
|
||||
switch e {
|
||||
case peerStateNew:
|
||||
return "New"
|
||||
case peerStateReady:
|
||||
return "Ready"
|
||||
case peerStateRemoved:
|
||||
return "Removed"
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown peerState: %d", e))
|
||||
}
|
||||
}
|
||||
|
||||
type scPeer struct {
|
||||
peerID p2p.ID
|
||||
|
||||
// initialized as New when peer is added, updated to Ready when statusUpdate is received,
|
||||
// updated to Removed when peer is removed
|
||||
state peerState
|
||||
|
||||
height int64 // updated when statusResponse is received
|
||||
lastTouched time.Time
|
||||
lastRate int64 // last receive rate in bytes
|
||||
}
|
||||
|
||||
func (p scPeer) String() string {
|
||||
return fmt.Sprintf("{state %v, height %d, lastTouched %v, lastRate %d, id %v}",
|
||||
p.state, p.height, p.lastTouched, p.lastRate, p.peerID)
|
||||
}
|
||||
|
||||
func newScPeer(peerID p2p.ID) *scPeer {
|
||||
return &scPeer{
|
||||
peerID: peerID,
|
||||
state: peerStateNew,
|
||||
height: -1,
|
||||
}
|
||||
}
|
||||
|
||||
// The scheduler keep track of the state of each block and each peer. The
|
||||
// scheduler will attempt to schedule new block requests with `trySchedule`
|
||||
// events and remove slow peers with `tryPrune` events.
|
||||
type scheduler struct {
|
||||
initHeight int64
|
||||
|
||||
// next block that needs to be processed. All blocks with smaller height are
|
||||
// in Processed state.
|
||||
height int64
|
||||
|
||||
// a map of peerID to scheduler specific peer struct `scPeer` used to keep
|
||||
// track of peer specific state
|
||||
peers map[p2p.ID]*scPeer
|
||||
peerTimeout time.Duration
|
||||
minRecvRate int64 // minimum receive rate from peer otherwise prune
|
||||
|
||||
// the maximum number of blocks that should be New, Received or Pending at any point
|
||||
// in time. This is used to enforce a limit on the blockStates map.
|
||||
targetPending int
|
||||
// a list of blocks to be scheduled (New), Pending or Received. Its length should be
|
||||
// smaller than targetPending.
|
||||
blockStates map[int64]blockState
|
||||
|
||||
// a map of heights to the peer we are waiting a response from
|
||||
pendingBlocks map[int64]p2p.ID
|
||||
|
||||
// the time at which a block was put in blockStatePending
|
||||
pendingTime map[int64]time.Time
|
||||
|
||||
// a map of heights to the peers that put the block in blockStateReceived
|
||||
receivedBlocks map[int64]p2p.ID
|
||||
}
|
||||
|
||||
func (sc scheduler) String() string {
|
||||
return fmt.Sprintf("ih: %d, bst: %v, peers: %v, pblks: %v, ptm %v, rblks: %v",
|
||||
sc.initHeight, sc.blockStates, sc.peers, sc.pendingBlocks, sc.pendingTime, sc.receivedBlocks)
|
||||
}
|
||||
|
||||
func newScheduler(initHeight int64) *scheduler {
|
||||
sc := scheduler{
|
||||
initHeight: initHeight,
|
||||
height: initHeight + 1,
|
||||
blockStates: make(map[int64]blockState),
|
||||
peers: make(map[p2p.ID]*scPeer),
|
||||
pendingBlocks: make(map[int64]p2p.ID),
|
||||
pendingTime: make(map[int64]time.Time),
|
||||
receivedBlocks: make(map[int64]p2p.ID),
|
||||
}
|
||||
|
||||
return &sc
|
||||
}
|
||||
|
||||
func (sc *scheduler) addPeer(peerID p2p.ID) error {
|
||||
if _, ok := sc.peers[peerID]; ok {
|
||||
// In the future we should be able to add a previously removed peer
|
||||
return fmt.Errorf("cannot add duplicate peer %s", peerID)
|
||||
}
|
||||
sc.peers[peerID] = newScPeer(peerID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) touchPeer(peerID p2p.ID, time time.Time) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("couldn't find peer %s", peerID)
|
||||
}
|
||||
|
||||
if peer.state != peerStateReady {
|
||||
return fmt.Errorf("tried to touch peer in state %s, must be Ready", peer.state)
|
||||
}
|
||||
|
||||
peer.lastTouched = time
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) removePeer(peerID p2p.ID) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("couldn't find peer %s", peerID)
|
||||
}
|
||||
|
||||
if peer.state == peerStateRemoved {
|
||||
return fmt.Errorf("tried to remove peer %s in peerStateRemoved", peerID)
|
||||
}
|
||||
|
||||
for height, pendingPeerID := range sc.pendingBlocks {
|
||||
if pendingPeerID == peerID {
|
||||
sc.setStateAtHeight(height, blockStateNew)
|
||||
delete(sc.pendingTime, height)
|
||||
delete(sc.pendingBlocks, height)
|
||||
}
|
||||
}
|
||||
|
||||
for height, rcvPeerID := range sc.receivedBlocks {
|
||||
if rcvPeerID == peerID {
|
||||
sc.setStateAtHeight(height, blockStateNew)
|
||||
delete(sc.receivedBlocks, height)
|
||||
}
|
||||
}
|
||||
|
||||
// remove the blocks from blockStates if the peer removal causes the max peer height to be lower.
|
||||
peer.state = peerStateRemoved
|
||||
maxPeerHeight := int64(0)
|
||||
for _, otherPeer := range sc.peers {
|
||||
if otherPeer.state != peerStateReady {
|
||||
continue
|
||||
}
|
||||
if otherPeer.peerID != peer.peerID && otherPeer.height > maxPeerHeight {
|
||||
maxPeerHeight = otherPeer.height
|
||||
}
|
||||
}
|
||||
for h := range sc.blockStates {
|
||||
if h > maxPeerHeight {
|
||||
delete(sc.blockStates, h)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// check if the blockPool is running low and add new blocks in New state to be requested.
|
||||
// This function is called when there is an increase in the maximum peer height or when
|
||||
// blocks are processed.
|
||||
func (sc *scheduler) addNewBlocks() {
|
||||
if len(sc.blockStates) >= sc.targetPending {
|
||||
return
|
||||
}
|
||||
|
||||
for i := sc.height; i < int64(sc.targetPending)+sc.height; i++ {
|
||||
if i > sc.maxHeight() {
|
||||
break
|
||||
}
|
||||
if sc.getStateAtHeight(i) == blockStateUnknown {
|
||||
sc.setStateAtHeight(i, blockStateNew)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *scheduler) setPeerHeight(peerID p2p.ID, height int64) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot find peer %s", peerID)
|
||||
}
|
||||
|
||||
if peer.state == peerStateRemoved {
|
||||
return fmt.Errorf("cannot set peer height for a peer in peerStateRemoved")
|
||||
}
|
||||
|
||||
if height < peer.height {
|
||||
return fmt.Errorf("cannot move peer height lower. from %d to %d", peer.height, height)
|
||||
}
|
||||
|
||||
peer.height = height
|
||||
peer.state = peerStateReady
|
||||
|
||||
sc.addNewBlocks()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) getStateAtHeight(height int64) blockState {
|
||||
if height <= sc.initHeight {
|
||||
return blockStateProcessed
|
||||
} else if state, ok := sc.blockStates[height]; ok {
|
||||
return state
|
||||
} else {
|
||||
return blockStateUnknown
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *scheduler) getPeersAtHeightOrAbove(height int64) []p2p.ID {
|
||||
peers := make([]p2p.ID, 0)
|
||||
for _, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
}
|
||||
if peer.height >= height {
|
||||
peers = append(peers, peer.peerID)
|
||||
}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
func (sc *scheduler) peersInactiveSince(duration time.Duration, now time.Time) []p2p.ID {
|
||||
peers := []p2p.ID{}
|
||||
for _, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
}
|
||||
if now.Sub(peer.lastTouched) > duration {
|
||||
peers = append(peers, peer.peerID)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the order is deterministic for testing
|
||||
sort.Sort(PeerByID(peers))
|
||||
return peers
|
||||
}
|
||||
|
||||
// will return peers who's lastRate i slower than minSpeed denominated in bytes
|
||||
func (sc *scheduler) peersSlowerThan(minSpeed int64) []p2p.ID {
|
||||
peers := []p2p.ID{}
|
||||
for peerID, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
}
|
||||
if peer.lastRate < minSpeed {
|
||||
peers = append(peers, peerID)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the order is deterministic for testing
|
||||
sort.Sort(PeerByID(peers))
|
||||
return peers
|
||||
}
|
||||
|
||||
func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.ID {
|
||||
prunable := []p2p.ID{}
|
||||
for peerID, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
}
|
||||
if now.Sub(peer.lastTouched) > peerTimout || peer.lastRate < minRecvRate {
|
||||
prunable = append(prunable, peerID)
|
||||
}
|
||||
}
|
||||
// Tests for handleTryPrunePeer() may fail without sort due to range non-determinism
|
||||
sort.Sort(PeerByID(prunable))
|
||||
return prunable
|
||||
}
|
||||
|
||||
func (sc *scheduler) setStateAtHeight(height int64, state blockState) {
|
||||
sc.blockStates[height] = state
|
||||
}
|
||||
|
||||
func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int64, now time.Time) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("couldn't find peer %s", peerID)
|
||||
}
|
||||
|
||||
if peer.state == peerStateRemoved {
|
||||
return fmt.Errorf("cannot receive blocks from removed peer %s", peerID)
|
||||
}
|
||||
|
||||
if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID {
|
||||
return fmt.Errorf("received block %d from peer %s without being requested", height, peerID)
|
||||
}
|
||||
|
||||
pendingTime, ok := sc.pendingTime[height]
|
||||
if !ok || now.Sub(pendingTime) <= 0 {
|
||||
return fmt.Errorf("clock error: block %d received at %s but requested at %s",
|
||||
height, pendingTime, now)
|
||||
}
|
||||
|
||||
peer.lastRate = size / now.Sub(pendingTime).Nanoseconds()
|
||||
|
||||
sc.setStateAtHeight(height, blockStateReceived)
|
||||
delete(sc.pendingBlocks, height)
|
||||
delete(sc.pendingTime, height)
|
||||
|
||||
sc.receivedBlocks[height] = peerID
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) markPending(peerID p2p.ID, height int64, time time.Time) error {
|
||||
state := sc.getStateAtHeight(height)
|
||||
if state != blockStateNew {
|
||||
return fmt.Errorf("block %d should be in blockStateNew but is %s", height, state)
|
||||
}
|
||||
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("cannot find peer %s", peerID)
|
||||
}
|
||||
|
||||
if peer.state != peerStateReady {
|
||||
return fmt.Errorf("cannot schedule %d from %s in %s", height, peerID, peer.state)
|
||||
}
|
||||
|
||||
if height > peer.height {
|
||||
return fmt.Errorf("cannot request height %d from peer %s that is at height %d",
|
||||
height, peerID, peer.height)
|
||||
}
|
||||
|
||||
sc.setStateAtHeight(height, blockStatePending)
|
||||
sc.pendingBlocks[height] = peerID
|
||||
// XXX: to make this more accurate we can introduce a message from
|
||||
// the IO routine which indicates the time the request was put on the wire
|
||||
sc.pendingTime[height] = time
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) markProcessed(height int64) error {
|
||||
state := sc.getStateAtHeight(height)
|
||||
if state != blockStateReceived {
|
||||
return fmt.Errorf("cannot mark height %d received from block state %s", height, state)
|
||||
}
|
||||
|
||||
sc.height++
|
||||
delete(sc.receivedBlocks, height)
|
||||
delete(sc.blockStates, height)
|
||||
sc.addNewBlocks()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) allBlocksProcessed() bool {
|
||||
return sc.height >= sc.maxHeight()
|
||||
}
|
||||
|
||||
// returns max peer height or the last processed block, i.e. sc.height
|
||||
func (sc *scheduler) maxHeight() int64 {
|
||||
max := sc.height - 1
|
||||
for _, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
}
|
||||
if peer.height > max {
|
||||
max = peer.height
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
// lowest block in sc.blockStates with state == blockStateNew or -1 if no new blocks
|
||||
func (sc *scheduler) nextHeightToSchedule() int64 {
|
||||
var min int64 = math.MaxInt64
|
||||
for height, state := range sc.blockStates {
|
||||
if state == blockStateNew && height < min {
|
||||
min = height
|
||||
}
|
||||
}
|
||||
if min == math.MaxInt64 {
|
||||
min = -1
|
||||
}
|
||||
return min
|
||||
}
|
||||
|
||||
func (sc *scheduler) pendingFrom(peerID p2p.ID) []int64 {
|
||||
var heights []int64
|
||||
for height, pendingPeerID := range sc.pendingBlocks {
|
||||
if pendingPeerID == peerID {
|
||||
heights = append(heights, height)
|
||||
}
|
||||
}
|
||||
return heights
|
||||
}
|
||||
|
||||
func (sc *scheduler) selectPeer(height int64) (p2p.ID, error) {
|
||||
peers := sc.getPeersAtHeightOrAbove(height)
|
||||
if len(peers) == 0 {
|
||||
return "", fmt.Errorf("cannot find peer for height %d", height)
|
||||
}
|
||||
|
||||
// create a map from number of pending requests to a list
|
||||
// of peers having that number of pending requests.
|
||||
pendingFrom := make(map[int][]p2p.ID)
|
||||
for _, peerID := range peers {
|
||||
numPending := len(sc.pendingFrom(peerID))
|
||||
pendingFrom[numPending] = append(pendingFrom[numPending], peerID)
|
||||
}
|
||||
|
||||
// find the set of peers with minimum number of pending requests.
|
||||
minPending := math.MaxInt64
|
||||
for mp := range pendingFrom {
|
||||
if mp < minPending {
|
||||
minPending = mp
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(PeerByID(pendingFrom[minPending]))
|
||||
return pendingFrom[minPending][0], nil
|
||||
}
|
||||
|
||||
// PeerByID is a list of peers sorted by peerID.
|
||||
type PeerByID []p2p.ID
|
||||
|
||||
func (peers PeerByID) Len() int {
|
||||
return len(peers)
|
||||
}
|
||||
func (peers PeerByID) Less(i, j int) bool {
|
||||
return bytes.Compare([]byte(peers[i]), []byte(peers[j])) == -1
|
||||
}
|
||||
|
||||
func (peers PeerByID) Swap(i, j int) {
|
||||
it := peers[i]
|
||||
peers[i] = peers[j]
|
||||
peers[j] = it
|
||||
}
|
||||
|
||||
// Handlers
|
||||
|
||||
// This handler gets the block, performs some validation and then passes it on to the processor.
|
||||
func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) {
|
||||
err := sc.touchPeer(event.peerID, event.time)
|
||||
if err != nil {
|
||||
return scPeerError{peerID: event.peerID, reason: err}, nil
|
||||
}
|
||||
|
||||
err = sc.markReceived(event.peerID, event.block.Height, event.size, event.time)
|
||||
if err != nil {
|
||||
return scPeerError{peerID: event.peerID, reason: err}, nil
|
||||
}
|
||||
|
||||
return scBlockReceived{peerID: event.peerID, block: event.block}, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleBlockProcessed(event pcBlockProcessed) (Event, error) {
|
||||
if event.height != sc.height {
|
||||
panic(fmt.Sprintf("processed height %d but expected height %d", event.height, sc.height))
|
||||
}
|
||||
err := sc.markProcessed(event.height)
|
||||
if err != nil {
|
||||
// It is possible that a peer error or timeout is handled after the processor
|
||||
// has processed the block but before the scheduler received this event,
|
||||
// so when pcBlockProcessed event is received the block had been requested again
|
||||
return scSchedulerFail{reason: err}, nil
|
||||
}
|
||||
|
||||
if sc.allBlocksProcessed() {
|
||||
return scFinishedEv{}, nil
|
||||
}
|
||||
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
// Handles an error from the processor. The processor had already cleaned the blocks from
|
||||
// the peers included in this event. Just attempt to remove the peers.
|
||||
func (sc *scheduler) handleBlockProcessError(event pcBlockVerificationFailure) (Event, error) {
|
||||
if len(sc.peers) == 0 {
|
||||
return noOp, nil
|
||||
}
|
||||
// The peers may have been just removed due to errors, low speed or timeouts.
|
||||
_ = sc.removePeer(event.firstPeerID)
|
||||
if event.firstPeerID != event.secondPeerID {
|
||||
_ = sc.removePeer(event.secondPeerID)
|
||||
}
|
||||
|
||||
if sc.allBlocksProcessed() {
|
||||
return scFinishedEv{}, nil
|
||||
}
|
||||
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleAddNewPeer(event addNewPeer) (Event, error) {
|
||||
err := sc.addPeer(event.peerID)
|
||||
if err != nil {
|
||||
return scSchedulerFail{reason: err}, nil
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
// XXX: unify types peerError
|
||||
func (sc *scheduler) handlePeerError(event peerError) (Event, error) {
|
||||
err := sc.removePeer(event.peerID)
|
||||
if err != nil {
|
||||
// XXX - It is possible that the removePeer fails here for legitimate reasons
|
||||
// for example if a peer timeout or error was handled just before this.
|
||||
return scSchedulerFail{reason: err}, nil
|
||||
}
|
||||
if sc.allBlocksProcessed() {
|
||||
return scFinishedEv{}, nil
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleTryPrunePeer(event tryPrunePeer) (Event, error) {
|
||||
prunablePeers := sc.prunablePeers(sc.peerTimeout, sc.minRecvRate, event.time)
|
||||
if len(prunablePeers) == 0 {
|
||||
return noOp, nil
|
||||
}
|
||||
for _, peerID := range prunablePeers {
|
||||
err := sc.removePeer(peerID)
|
||||
if err != nil {
|
||||
// Should never happen as prunablePeers() returns only existing peers in Ready state.
|
||||
panic("scheduler data corruption")
|
||||
}
|
||||
}
|
||||
|
||||
// If all blocks are processed we should finish even some peers were pruned.
|
||||
if sc.allBlocksProcessed() {
|
||||
return scFinishedEv{}, nil
|
||||
}
|
||||
|
||||
return scPeersPruned{peers: prunablePeers}, nil
|
||||
|
||||
}
|
||||
|
||||
// TODO - Schedule multiple block requests
|
||||
func (sc *scheduler) handleTrySchedule(event trySchedule) (Event, error) {
|
||||
|
||||
nextHeight := sc.nextHeightToSchedule()
|
||||
if nextHeight == -1 {
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
bestPeerID, err := sc.selectPeer(nextHeight)
|
||||
if err != nil {
|
||||
return scSchedulerFail{reason: err}, nil
|
||||
}
|
||||
if err := sc.markPending(bestPeerID, nextHeight, event.time); err != nil {
|
||||
return scSchedulerFail{reason: err}, nil // XXX: peerError might be more appropriate
|
||||
}
|
||||
return scBlockRequest{peerID: bestPeerID, height: nextHeight}, nil
|
||||
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleStatusResponse(event bcStatusResponse) (Event, error) {
|
||||
err := sc.setPeerHeight(event.peerID, event.height)
|
||||
if err != nil {
|
||||
return scPeerError{peerID: event.peerID, reason: err}, nil
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handle(event Event) (Event, error) {
|
||||
switch event := event.(type) {
|
||||
case bcStatusResponse:
|
||||
nextEvent, err := sc.handleStatusResponse(event)
|
||||
return nextEvent, err
|
||||
case bcBlockResponse:
|
||||
nextEvent, err := sc.handleBlockResponse(event)
|
||||
return nextEvent, err
|
||||
case trySchedule:
|
||||
nextEvent, err := sc.handleTrySchedule(event)
|
||||
return nextEvent, err
|
||||
case addNewPeer:
|
||||
nextEvent, err := sc.handleAddNewPeer(event)
|
||||
return nextEvent, err
|
||||
case tryPrunePeer:
|
||||
nextEvent, err := sc.handleTryPrunePeer(event)
|
||||
return nextEvent, err
|
||||
case peerError:
|
||||
nextEvent, err := sc.handlePeerError(event)
|
||||
return nextEvent, err
|
||||
case pcBlockProcessed:
|
||||
nextEvent, err := sc.handleBlockProcessed(event)
|
||||
return nextEvent, err
|
||||
case pcBlockVerificationFailure:
|
||||
nextEvent, err := sc.handleBlockProcessError(event)
|
||||
return nextEvent, err
|
||||
default:
|
||||
return scSchedulerFail{reason: fmt.Errorf("unknown event %v", event)}, nil
|
||||
}
|
||||
//return noOp, nil
|
||||
}
|
||||
2293
blockchain/v2/scheduler_test.go
Normal file
2293
blockchain/v2/scheduler_test.go
Normal file
File diff suppressed because it is too large
Load Diff
64
blockchain/v2/types.go
Normal file
64
blockchain/v2/types.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"github.com/Workiva/go-datastructures/queue"
|
||||
)
|
||||
|
||||
type Event queue.Item
|
||||
|
||||
type priority interface {
|
||||
Compare(other queue.Item) int
|
||||
Priority() int
|
||||
}
|
||||
|
||||
type priorityLow struct{}
|
||||
type priorityNormal struct{}
|
||||
type priorityHigh struct{}
|
||||
|
||||
func (p priorityLow) Priority() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (p priorityNormal) Priority() int {
|
||||
return 2
|
||||
}
|
||||
|
||||
func (p priorityHigh) Priority() int {
|
||||
return 3
|
||||
}
|
||||
|
||||
func (p priorityLow) Compare(other queue.Item) int {
|
||||
op := other.(priority)
|
||||
if p.Priority() > op.Priority() {
|
||||
return 1
|
||||
} else if p.Priority() == op.Priority() {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (p priorityNormal) Compare(other queue.Item) int {
|
||||
op := other.(priority)
|
||||
if p.Priority() > op.Priority() {
|
||||
return 1
|
||||
} else if p.Priority() == op.Priority() {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (p priorityHigh) Compare(other queue.Item) int {
|
||||
op := other.(priority)
|
||||
if p.Priority() > op.Priority() {
|
||||
return 1
|
||||
} else if p.Priority() == op.Priority() {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
type noOpEvent struct {
|
||||
priorityLow
|
||||
}
|
||||
|
||||
var noOp = noOpEvent{}
|
||||
34
cmd/contract_tests/main.go
Normal file
34
cmd/contract_tests/main.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/snikch/goodman/hooks"
|
||||
"github.com/snikch/goodman/transaction"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// This must be compiled beforehand and given to dredd as parameter, in the meantime the server should be running
|
||||
h := hooks.NewHooks()
|
||||
server := hooks.NewServer(hooks.NewHooksRunner(h))
|
||||
h.BeforeAll(func(t []*transaction.Transaction) {
|
||||
fmt.Println(t[0].Name)
|
||||
})
|
||||
h.BeforeEach(func(t *transaction.Transaction) {
|
||||
if strings.HasPrefix(t.Name, "Tx") ||
|
||||
// We need a proper example of evidence to broadcast
|
||||
strings.HasPrefix(t.Name, "Info > /broadcast_evidence") ||
|
||||
// We need a proper example of path and data
|
||||
strings.HasPrefix(t.Name, "ABCI > /abci_query") ||
|
||||
// We need to find a way to make a transaction before starting the tests,
|
||||
// that hash should replace the dummy one in hte swagger file
|
||||
strings.HasPrefix(t.Name, "Info > /tx") {
|
||||
t.Skip = true
|
||||
fmt.Printf("%s Has been skipped\n", t.Name)
|
||||
}
|
||||
})
|
||||
server.Serve()
|
||||
defer server.Listener.Close()
|
||||
fmt.Print("FINE")
|
||||
}
|
||||
@@ -6,8 +6,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
)
|
||||
@@ -35,8 +36,8 @@ func main() {
|
||||
|
||||
pv := privval.LoadFilePV(*privValKeyPath, *privValStatePath)
|
||||
|
||||
var dialer privval.Dialer
|
||||
protocol, address := cmn.ProtocolAndAddress(*addr)
|
||||
var dialer privval.SocketDialer
|
||||
protocol, address := tmnet.ProtocolAndAddress(*addr)
|
||||
switch protocol {
|
||||
case "unix":
|
||||
dialer = privval.DialUnixFn(address)
|
||||
@@ -45,19 +46,25 @@ func main() {
|
||||
dialer = privval.DialTCPFn(address, connTimeout, ed25519.GenPrivKey())
|
||||
default:
|
||||
logger.Error("Unknown protocol", "protocol", protocol)
|
||||
return
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
rs := privval.NewRemoteSigner(logger, *chainID, pv, dialer)
|
||||
err := rs.Start()
|
||||
sd := privval.NewSignerDialerEndpoint(logger, dialer)
|
||||
ss := privval.NewSignerServer(sd, *chainID, pv)
|
||||
|
||||
err := ss.Start()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
cmn.TrapSignal(func() {
|
||||
err := rs.Stop()
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
err := ss.Stop()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user