mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-14 08:42:48 +00:00
Compare commits
443 Commits
v0.34.1-de
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9d1be86031 | ||
|
|
35581cf54e | ||
|
|
64747b2b18 | ||
|
|
014cdcf098 | ||
|
|
08e5d0bf78 | ||
|
|
c645fd0b71 | ||
|
|
ec471ba27e | ||
|
|
413f5f7115 | ||
|
|
e0f68fe640 | ||
|
|
dae7b69af3 | ||
|
|
7917485bc7 | ||
|
|
e7b9ee7cef | ||
|
|
7417ddf351 | ||
|
|
161611cb3c | ||
|
|
bdedf2ec20 | ||
|
|
6e7fa2a09f | ||
|
|
e914fe40ec | ||
|
|
a6dd0d270a | ||
|
|
bf4688b37c | ||
|
|
dbf22de42b | ||
|
|
3ec6e424d6 | ||
|
|
df5fe1fb21 | ||
|
|
a28c987f5a | ||
|
|
1d160a5a86 | ||
|
|
430afb23e7 | ||
|
|
df5ba80914 | ||
|
|
bda1dd4734 | ||
|
|
14efa9ce89 | ||
|
|
a349a67ad9 | ||
|
|
dc6d73a408 | ||
|
|
7e05d43b60 | ||
|
|
95a7cc14cc | ||
|
|
a71811f38a | ||
|
|
4999643e2a | ||
|
|
5164dc6d7d | ||
|
|
1b2963e9e6 | ||
|
|
1135889847 | ||
|
|
f2cbc2220e | ||
|
|
6354c99dbf | ||
|
|
014d0d6ca0 | ||
|
|
441405eb9e | ||
|
|
3ab015127c | ||
|
|
d47d110528 | ||
|
|
71a8fcfb16 | ||
|
|
f573d3d2a6 | ||
|
|
29c5a062d2 | ||
|
|
eed27addec | ||
|
|
a41c5eec11 | ||
|
|
bca737c2d3 | ||
|
|
58669ae8c1 | ||
|
|
1f0cf7762b | ||
|
|
dd86d3e8bb | ||
|
|
27404910de | ||
|
|
08f55593dd | ||
|
|
9fdbd2e466 | ||
|
|
a80c6a229a | ||
|
|
d513c925dd | ||
|
|
dad439f115 | ||
|
|
fbd754b4de | ||
|
|
65367d7e94 | ||
|
|
c72335712b | ||
|
|
fb5cd16de2 | ||
|
|
708a62fc31 | ||
|
|
7e902dc79a | ||
|
|
0846f3e4c7 | ||
|
|
46badfabd9 | ||
|
|
eb465a3f4f | ||
|
|
d6be59788b | ||
|
|
f9f3bed808 | ||
|
|
64dfeb775c | ||
|
|
6688db7be1 | ||
|
|
d32df22c8b | ||
|
|
223ece93c8 | ||
|
|
ba1711e706 | ||
|
|
8df725f92f | ||
|
|
2b3737333f | ||
|
|
bbb5f3bfef | ||
|
|
d6b413ff8e | ||
|
|
7b615f8123 | ||
|
|
7d9447198c | ||
|
|
5276400c30 | ||
|
|
493dd69f31 | ||
|
|
1d9d947d88 | ||
|
|
479bdd71e6 | ||
|
|
17f4ea3680 | ||
|
|
5c32cfa00e | ||
|
|
5e354a3a57 | ||
|
|
9e14e954f9 | ||
|
|
6b7d30cf37 | ||
|
|
25101d1116 | ||
|
|
b83cc0aeda | ||
|
|
4a1df4911d | ||
|
|
a3cc3d98b9 | ||
|
|
fe024521ef | ||
|
|
02def9ca64 | ||
|
|
ce2409f3ff | ||
|
|
30915e9337 | ||
|
|
8a7affe3a0 | ||
|
|
851f404305 | ||
|
|
f63496dcd6 | ||
|
|
044b12585f | ||
|
|
ac2e7fab3d | ||
|
|
e6f0711648 | ||
|
|
603a1d6610 | ||
|
|
ad72896ca5 | ||
|
|
9afdac6b52 | ||
|
|
a694dad540 | ||
|
|
9d1556a5bc | ||
|
|
7ae73be891 | ||
|
|
bbb1506f5e | ||
|
|
7cb014cf27 | ||
|
|
004967f962 | ||
|
|
6670c24e5f | ||
|
|
b685ec7f2d | ||
|
|
016c91b50f | ||
|
|
34f23ab88a | ||
|
|
aef0bd874d | ||
|
|
d2bd4471bc | ||
|
|
c7006af6fd | ||
|
|
972eee6ebc | ||
|
|
b06540b5ff | ||
|
|
d1213f7e5f | ||
|
|
624bbac8f6 | ||
|
|
874c9a0951 | ||
|
|
986f8d6532 | ||
|
|
e40e7ea46e | ||
|
|
7fa34c63af | ||
|
|
da6ec8f082 | ||
|
|
efddab0734 | ||
|
|
3454f8cb89 | ||
|
|
2f231ceb95 | ||
|
|
6e85f46d9a | ||
|
|
89139784c5 | ||
|
|
ecf19029b4 | ||
|
|
9ae5797866 | ||
|
|
07670318a9 | ||
|
|
1e32a149dd | ||
|
|
2c553d735a | ||
|
|
799489e474 | ||
|
|
66b1a3ee4c | ||
|
|
06e8620621 | ||
|
|
b2dd100a76 | ||
|
|
314b139ac3 | ||
|
|
551072c962 | ||
|
|
4b5472c387 | ||
|
|
fd3bfb38e7 | ||
|
|
186e0e4df2 | ||
|
|
a97bb37d44 | ||
|
|
9e8837ad63 | ||
|
|
6b4e9078de | ||
|
|
1d25a3f0bc | ||
|
|
96085df7c1 | ||
|
|
cb6baad5ac | ||
|
|
db60bbad54 | ||
|
|
5487718cff | ||
|
|
cf58c4191b | ||
|
|
ce70b10f81 | ||
|
|
98c75c9429 | ||
|
|
9fe245025f | ||
|
|
de423678eb | ||
|
|
6a14fc2105 | ||
|
|
89bb82617a | ||
|
|
df9b1676f9 | ||
|
|
f88aad5903 | ||
|
|
75c6af7dcf | ||
|
|
bc63f213da | ||
|
|
80f656d8d7 | ||
|
|
f36cc80568 | ||
|
|
c477c810f3 | ||
|
|
3757810247 | ||
|
|
3b467f951d | ||
|
|
b14dc70664 | ||
|
|
2cbb35f980 | ||
|
|
c9c570e151 | ||
|
|
e6700355f6 | ||
|
|
40f18b8d8f | ||
|
|
4d0b6e7c5a | ||
|
|
6695e525f9 | ||
|
|
6eeb1b3a5d | ||
|
|
cac59a7677 | ||
|
|
dfd5bae784 | ||
|
|
41c176ccc6 | ||
|
|
05340ca069 | ||
|
|
9994396e59 | ||
|
|
c4834df3f3 | ||
|
|
12e3419f2b | ||
|
|
9ec863f948 | ||
|
|
d0031b0503 | ||
|
|
d35b50b528 | ||
|
|
bd48acb2ca | ||
|
|
0b835bea7a | ||
|
|
12ecfb0383 | ||
|
|
3e7fc468e4 | ||
|
|
113118ec00 | ||
|
|
4ef140f6ca | ||
|
|
61831cf5ef | ||
|
|
8a2dcbafae | ||
|
|
3e119fc6c4 | ||
|
|
f721bf5154 | ||
|
|
3567d3ab38 | ||
|
|
46a6691e11 | ||
|
|
876b3c0dbe | ||
|
|
31b3e279fc | ||
|
|
85870def7b | ||
|
|
ff2758b32e | ||
|
|
a82cb7dcda | ||
|
|
1dfb3451ea | ||
|
|
9f13b9b083 | ||
|
|
16ba782fa6 | ||
|
|
474ed04273 | ||
|
|
2d8287d0f7 | ||
|
|
294a9695b4 | ||
|
|
849461aab2 | ||
|
|
8ba6d218e4 | ||
|
|
0f8932f4ef | ||
|
|
73ef2675ce | ||
|
|
e0c6199aae | ||
|
|
0c05841902 | ||
|
|
4023580a25 | ||
|
|
2db1e422d8 | ||
|
|
093961ae2d | ||
|
|
d030cddca0 | ||
|
|
3dff227c5b | ||
|
|
e290bd624f | ||
|
|
0366c2b688 | ||
|
|
6fde228e9d | ||
|
|
b69ac23fd2 | ||
|
|
da9eefd111 | ||
|
|
2c2f511f24 | ||
|
|
8b84c7c168 | ||
|
|
0712063ec8 | ||
|
|
c2908ef785 | ||
|
|
d515bbcf1d | ||
|
|
be8c9833ca | ||
|
|
358b1f23c0 | ||
|
|
c376b44f1c | ||
|
|
8dd8a4e8ea | ||
|
|
353e3a3243 | ||
|
|
a9b4fac610 | ||
|
|
1614e12035 | ||
|
|
68eceda0b5 | ||
|
|
b878326396 | ||
|
|
693e11c6c6 | ||
|
|
6cc3e23a95 | ||
|
|
a9ac63510d | ||
|
|
bd968aba1f | ||
|
|
e54fdb6204 | ||
|
|
7869f5ec1d | ||
|
|
af35ca9cf4 | ||
|
|
c9966cd6be | ||
|
|
6c0c27320c | ||
|
|
b7a4d5e7ba | ||
|
|
0682337de2 | ||
|
|
b00cac9368 | ||
|
|
b2f01448be | ||
|
|
4e25703d58 | ||
|
|
d004a584f8 | ||
|
|
11523b1302 | ||
|
|
8bb85856d0 | ||
|
|
b9cdd0e28e | ||
|
|
1b5697a41d | ||
|
|
a047a4a70f | ||
|
|
52b1d90f56 | ||
|
|
28bebe3ddb | ||
|
|
dea73e08b3 | ||
|
|
28ce355656 | ||
|
|
55ae781efa | ||
|
|
0191a22636 | ||
|
|
9d9b947b02 | ||
|
|
c6e0d20d4b | ||
|
|
efd9d07257 | ||
|
|
a0f376127d | ||
|
|
8d3c36ccc3 | ||
|
|
15eb2c2211 | ||
|
|
e4d2893ff6 | ||
|
|
afd07096a7 | ||
|
|
340071d81b | ||
|
|
53d40e1092 | ||
|
|
bedb00d252 | ||
|
|
1030072dd0 | ||
|
|
1b2174a0da | ||
|
|
6bac9d9f43 | ||
|
|
5efbbab789 | ||
|
|
4a0fab041b | ||
|
|
5ee2ada942 | ||
|
|
fbf2c3815d | ||
|
|
cc57a560e7 | ||
|
|
950c9f71b5 | ||
|
|
90a2c33285 | ||
|
|
093dcfc8a0 | ||
|
|
72851a12d3 | ||
|
|
07979d88d0 | ||
|
|
12eac92738 | ||
|
|
73375b0912 | ||
|
|
e3a79d4e2e | ||
|
|
fa3287c012 | ||
|
|
cb7c9564a4 | ||
|
|
9df5fcf1f1 | ||
|
|
d575f8a38f | ||
|
|
1e355b6b56 | ||
|
|
108073077b | ||
|
|
8b48d23084 | ||
|
|
c3d2f68c05 | ||
|
|
0f58a8470a | ||
|
|
197b746f8d | ||
|
|
06623202f0 | ||
|
|
a3a9398971 | ||
|
|
7b7d6e1f98 | ||
|
|
98be3f2aab | ||
|
|
3e41bb57d6 | ||
|
|
6252b63e53 | ||
|
|
591e55b301 | ||
|
|
0028ac38ed | ||
|
|
57aed01639 | ||
|
|
8788673a3e | ||
|
|
f009a1a731 | ||
|
|
33fb03fcc8 | ||
|
|
eb09376ba0 | ||
|
|
f48b154751 | ||
|
|
2dd5cbfb5c | ||
|
|
3c22ed8320 | ||
|
|
7f02d8971c | ||
|
|
b021ad5b7a | ||
|
|
f89eca427a | ||
|
|
0213e544e0 | ||
|
|
6b2ab0f0e1 | ||
|
|
a2a6852ab9 | ||
|
|
7ea4dc52ed | ||
|
|
d969a5ed1b | ||
|
|
0def3a964a | ||
|
|
54338a52fa | ||
|
|
bf45df0b2b | ||
|
|
46fa6e666c | ||
|
|
a18e3de3ac | ||
|
|
e8d35597df | ||
|
|
bdbe4a7cd7 | ||
|
|
6a7a431ba5 | ||
|
|
23c8a7a93d | ||
|
|
cf3a720988 | ||
|
|
ad552b2bb1 | ||
|
|
5d63765990 | ||
|
|
3185bb8b22 | ||
|
|
2eba38051a | ||
|
|
15eed81f12 | ||
|
|
fca7c6449a | ||
|
|
c2b5f8bc4a | ||
|
|
4246000a8c | ||
|
|
2924d41f8b | ||
|
|
13833cba9e | ||
|
|
17ce2ccc92 | ||
|
|
b1328db07f | ||
|
|
829a9e1de7 | ||
|
|
dc101f2eff | ||
|
|
dc90cf60d5 | ||
|
|
9f0d71e81f | ||
|
|
7f06371915 | ||
|
|
2a4fd3804c | ||
|
|
0d9606e1b4 | ||
|
|
ce144a1d71 | ||
|
|
6c0d4070c2 | ||
|
|
15b70373cc | ||
|
|
182fa32851 | ||
|
|
fe94825985 | ||
|
|
386a44cd02 | ||
|
|
0f29b1631e | ||
|
|
b80d4d8ff0 | ||
|
|
b5b53bfc0d | ||
|
|
4ed0fddc37 | ||
|
|
23bc2f690c | ||
|
|
bea7673c1c | ||
|
|
26493bbbd8 | ||
|
|
53463b3fef | ||
|
|
e0cf94f5b0 | ||
|
|
047b5ea85e | ||
|
|
9567477d55 | ||
|
|
637d76254d | ||
|
|
a447c507e4 | ||
|
|
24d13479fe | ||
|
|
9c32ad4a02 | ||
|
|
de0bef5db5 | ||
|
|
0a4432baf5 | ||
|
|
0bdc76a78c | ||
|
|
a2addecb3d | ||
|
|
54a0940e40 | ||
|
|
25fafb30b5 | ||
|
|
59f3f63d33 | ||
|
|
9d354c842e | ||
|
|
70a62be5c6 | ||
|
|
ad4f54e9b2 | ||
|
|
0022779e07 | ||
|
|
96dda8810d | ||
|
|
5cfe035362 | ||
|
|
4947333e67 | ||
|
|
8329d12c18 | ||
|
|
f093d5837b | ||
|
|
ceea64ec28 | ||
|
|
c4f1b2d7db | ||
|
|
a0f08686fb | ||
|
|
dacbfbe1fe | ||
|
|
75879ab1d7 | ||
|
|
8b4f0dba70 | ||
|
|
2f72f553ac | ||
|
|
d113da01cd | ||
|
|
b17b28a163 | ||
|
|
6473f0178c | ||
|
|
4e2e487c7a | ||
|
|
8ebb39eed6 | ||
|
|
5e6e6315ad | ||
|
|
9379bc92fd | ||
|
|
51b8d3a153 | ||
|
|
bf42bf0fd5 | ||
|
|
cbdc089321 | ||
|
|
f9bfb40d53 | ||
|
|
e7568f9e0c | ||
|
|
3a4a6ae9ac | ||
|
|
4462e2697c | ||
|
|
0003aabe65 | ||
|
|
4b3565fcaa | ||
|
|
64b0f5b363 | ||
|
|
a58454e788 | ||
|
|
1b733ea28d | ||
|
|
41ab199378 | ||
|
|
0f3b49a915 | ||
|
|
55ff694aa6 | ||
|
|
406dd74220 | ||
|
|
c374fc010a | ||
|
|
3822ab924e | ||
|
|
7c17fa115a | ||
|
|
020edbc11d | ||
|
|
79d535dd67 | ||
|
|
29ca7de63c | ||
|
|
6f908eb814 | ||
|
|
b3238cdcd9 | ||
|
|
bd1f43d793 | ||
|
|
09982ae407 | ||
|
|
7d5d417dc9 | ||
|
|
dac18d73a7 | ||
|
|
383bc5337f | ||
|
|
e74176ad1a | ||
|
|
52994aa2a9 | ||
|
|
6149f21cd6 | ||
|
|
1a2cc933a0 | ||
|
|
e0f686ccac |
168
.circleci/config.yml
Normal file
168
.circleci/config.yml
Normal file
@@ -0,0 +1,168 @@
|
||||
version: 2.1
|
||||
|
||||
executors:
|
||||
golang:
|
||||
docker:
|
||||
- image: tendermintdev/docker-tendermint-build
|
||||
working_directory: /go/src/github.com/tendermint/tendermint
|
||||
environment:
|
||||
GOBIN: /tmp/bin
|
||||
release:
|
||||
machine: true
|
||||
docs:
|
||||
docker:
|
||||
- image: tendermintdev/docker-website-deployment
|
||||
environment:
|
||||
AWS_REGION: us-east-1
|
||||
|
||||
commands:
|
||||
run_test:
|
||||
parameters:
|
||||
script_path:
|
||||
type: string
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore go modules cache"
|
||||
keys:
|
||||
- go-mod-v1-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
name: "Running test"
|
||||
command: |
|
||||
bash << parameters.script_path >>
|
||||
jobs:
|
||||
setup_dependencies:
|
||||
executor: golang
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore go modules cache"
|
||||
keys:
|
||||
- go-mod-v1-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
command: |
|
||||
mkdir -p /tmp/bin
|
||||
- run:
|
||||
name: Cache go modules
|
||||
command: make go-mod-cache
|
||||
- run:
|
||||
name: tools
|
||||
command: make tools
|
||||
- run:
|
||||
name: "Build binaries"
|
||||
command: make install install_abci
|
||||
- save_cache:
|
||||
name: "Save go modules cache"
|
||||
key: go-mod-v1-{{ checksum "go.sum" }}
|
||||
paths:
|
||||
- "/go/pkg/mod"
|
||||
- save_cache:
|
||||
name: "Save source code cache"
|
||||
key: go-src-v1-{{ .Revision }}
|
||||
paths:
|
||||
- ".git"
|
||||
- persist_to_workspace:
|
||||
root: "/tmp/bin"
|
||||
paths:
|
||||
- "."
|
||||
|
||||
deploy_docs:
|
||||
executor: docs
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: "Pull versions"
|
||||
command: git fetch origin v0.32 v0.33
|
||||
- run:
|
||||
name: "Build docs"
|
||||
command: make build-docs
|
||||
- run:
|
||||
name: "Sync to S3"
|
||||
command: make sync-docs
|
||||
|
||||
prepare_build:
|
||||
executor: golang
|
||||
steps:
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- run:
|
||||
name: Get next release number
|
||||
command: |
|
||||
export LAST_TAG="`git describe --tags --abbrev=0 --match "${CIRCLE_BRANCH}.*"`"
|
||||
echo "Last tag: ${LAST_TAG}"
|
||||
if [ -z "${LAST_TAG}" ]; then
|
||||
export LAST_TAG="${CIRCLE_BRANCH}"
|
||||
echo "Last tag not found. Possibly fresh branch or feature branch. Setting ${LAST_TAG} as tag."
|
||||
fi
|
||||
export NEXT_TAG="`python -u scripts/release_management/bump-semver.py --version "${LAST_TAG}"`"
|
||||
echo "Next tag: ${NEXT_TAG}"
|
||||
echo "export CIRCLE_TAG=\"${NEXT_TAG}\"" > release-version.source
|
||||
- run:
|
||||
name: Build dependencies
|
||||
command: make tools
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- "release-version.source"
|
||||
- save_cache:
|
||||
key: v2-release-deps-{{ checksum "go.sum" }}
|
||||
paths:
|
||||
- "/go/pkg/mod"
|
||||
|
||||
# # Test RPC implementation against the swagger documented specs
|
||||
# contract_tests:
|
||||
# working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint
|
||||
# machine:
|
||||
# image: circleci/classic:latest
|
||||
# environment:
|
||||
# GOBIN: /home/circleci/.go_workspace/bin
|
||||
# GOPATH: /home/circleci/.go_workspace/
|
||||
# GOOS: linux
|
||||
# GOARCH: amd64
|
||||
# parallelism: 1
|
||||
# steps:
|
||||
# - checkout
|
||||
# - run:
|
||||
# name: Test RPC endpoints against swagger documentation
|
||||
# command: |
|
||||
# set -x
|
||||
# export PATH=~/.local/bin:$PATH
|
||||
# # install node and dredd
|
||||
# ./scripts/get_nodejs.sh
|
||||
# # build the binaries with a proper version of Go
|
||||
# docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux build-contract-tests-hooks
|
||||
# # This docker image works with go 1.7, we can install here the hook handler that contract-tests is going to use
|
||||
# go get github.com/snikch/goodman/cmd/goodman
|
||||
# make contract-tests
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
docs:
|
||||
jobs:
|
||||
- deploy_docs:
|
||||
context: tendermint-docs
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
tags:
|
||||
only:
|
||||
- /^v.*/
|
||||
- deploy_docs:
|
||||
context: tendermint-docs-staging
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- docs-staging
|
||||
# - contract_tests:
|
||||
# requires:
|
||||
# - setup_dependencies
|
||||
24
.github/CODEOWNERS
vendored
24
.github/CODEOWNERS
vendored
@@ -1,27 +1,11 @@
|
||||
# CODEOWNERS: https://help.github.com/articles/about-codeowners/
|
||||
|
||||
# Everything goes through the following "global owners" by default.
|
||||
# Everything goes through the following "global owners" by default.
|
||||
# Unless a later match takes precedence, these three will be
|
||||
# requested for review when someone opens a PR.
|
||||
# requested for review when someone opens a PR.
|
||||
# Note that the last matching pattern takes precedence, so
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @ebuchman @erikgrinaker @melekes @tessr
|
||||
|
||||
# Overrides for tooling packages
|
||||
.circleci/ @marbar3778 @ebuchman @erikgrinaker @melekes @tessr
|
||||
.github/ @marbar3778 @ebuchman @erikgrinaker @melekes @tessr
|
||||
DOCKER/ @marbar3778 @ebuchman @erikgrinaker @melekes @tessr
|
||||
|
||||
# Overrides for core Tendermint packages
|
||||
abci/ @marbar3778 @ebuchman @erikgrinaker @melekes @tessr
|
||||
evidence/ @cmwaters @ebuchman @erikgrinaker @melekes @tessr
|
||||
light/ @cmwaters @melekes @ebuchman
|
||||
|
||||
# Overrides for docs
|
||||
*.md @marbar3778 @ebuchman @erikgrinaker @melekes @tessr
|
||||
docs/ @marbar3778 @ebuchman @erikgrinaker @melekes @tessr
|
||||
|
||||
|
||||
* @ebuchman @tendermint/tendermint-engineering @adizere @lasarojc
|
||||
|
||||
|
||||
37
.github/ISSUE_TEMPLATE/proposal.md
vendored
Normal file
37
.github/ISSUE_TEMPLATE/proposal.md
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
---
|
||||
name: Protocol Change Proposal
|
||||
about: Create a proposal to request a change to the protocol
|
||||
|
||||
---
|
||||
|
||||
<!-- < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < < ☺
|
||||
v ✰ Thanks for opening an issue! ✰
|
||||
v Before smashing the submit button please review the template.
|
||||
v Word of caution: Under-specified proposals may be rejected summarily
|
||||
☺ > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > > -->
|
||||
|
||||
# Protocol Change Proposal
|
||||
|
||||
## Summary
|
||||
|
||||
<!-- Short, concise description of the proposed change -->
|
||||
|
||||
## Problem Definition
|
||||
|
||||
<!-- Why do we need this change?
|
||||
What problems may be addressed by introducing this change?
|
||||
What benefits does Tendermint stand to gain by including this change?
|
||||
Are there any disadvantages of including this change? -->
|
||||
|
||||
## Proposal
|
||||
|
||||
<!-- Detailed description of requirements of implementation -->
|
||||
|
||||
____
|
||||
|
||||
#### For Admin Use
|
||||
|
||||
- [ ] Not duplicate issue
|
||||
- [ ] Appropriate labels applied
|
||||
- [ ] Appropriate contributors tagged
|
||||
- [ ] Contributor assigned/self-assigned
|
||||
28
.github/dependabot.yml
vendored
28
.github/dependabot.yml
vendored
@@ -1,28 +0,0 @@
|
||||
version: 2
|
||||
updates:
|
||||
- package-ecosystem: github-actions
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
open-pull-requests-limit: 10
|
||||
- package-ecosystem: npm
|
||||
directory: "/docs"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
open-pull-requests-limit: 10
|
||||
reviewers:
|
||||
- fadeev
|
||||
- package-ecosystem: gomod
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: daily
|
||||
time: "11:00"
|
||||
open-pull-requests-limit: 10
|
||||
reviewers:
|
||||
- melekes
|
||||
- tessr
|
||||
- erikgrinaker
|
||||
labels:
|
||||
- T:dependencies
|
||||
8
.github/linter/markdownlint.yml
vendored
8
.github/linter/markdownlint.yml
vendored
@@ -1,8 +0,0 @@
|
||||
default: true,
|
||||
MD007: { "indent": 4 }
|
||||
MD013: false
|
||||
MD024: { siblings_only: true }
|
||||
MD025: false
|
||||
MD033: { no-inline-html: false }
|
||||
no-hard-tabs: false
|
||||
whitespace: false
|
||||
21
.github/mergify.yml
vendored
21
.github/mergify.yml
vendored
@@ -1,10 +1,19 @@
|
||||
pull_request_rules:
|
||||
- name: Automerge to master
|
||||
queue_rules:
|
||||
- name: default
|
||||
conditions:
|
||||
- base=master
|
||||
- base=v0.34.x
|
||||
- label=S:automerge
|
||||
|
||||
pull_request_rules:
|
||||
- name: Automerge to v0.34.x
|
||||
conditions:
|
||||
- base=v0.34.x
|
||||
- label=S:automerge
|
||||
actions:
|
||||
merge:
|
||||
queue:
|
||||
method: squash
|
||||
strict: true
|
||||
commit_message: title+body
|
||||
name: default
|
||||
commit_message_template: |
|
||||
{{ title }} (#{{ number }})
|
||||
|
||||
{{ body }}
|
||||
|
||||
75
.github/workflows/check-generated.yml
vendored
Normal file
75
.github/workflows/check-generated.yml
vendored
Normal file
@@ -0,0 +1,75 @@
|
||||
# Verify that generated code is up-to-date.
|
||||
#
|
||||
# Note that we run these checks regardless whether the input files have
|
||||
# changed, because generated code can change in response to toolchain updates
|
||||
# even if no files in the repository are modified.
|
||||
name: Check generated code
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
check-mocks:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: "Check generated mocks"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
readonly MOCKERY=2.12.3 # N.B. no leading "v"
|
||||
curl -sL "https://github.com/vektra/mockery/releases/download/v${MOCKERY}/mockery_${MOCKERY}_Linux_x86_64.tar.gz" | tar -C /usr/local/bin -xzf -
|
||||
make mockery 2>/dev/null
|
||||
|
||||
if ! git diff --stat --exit-code ; then
|
||||
echo ">> ERROR:"
|
||||
echo ">>"
|
||||
echo ">> Generated mocks require update (either Mockery or source files may have changed)."
|
||||
echo ">> Ensure your tools are up-to-date, re-run 'make mockery' and update this PR."
|
||||
echo ">>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
check-proto:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 1 # we need a .git directory to run git diff
|
||||
|
||||
- name: "Check protobuf generated code"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# Install buf and gogo tools, so that differences that arise from
|
||||
# toolchain differences are also caught.
|
||||
readonly tools="$(mktemp -d)"
|
||||
export PATH="${PATH}:${tools}/bin"
|
||||
export GOBIN="${tools}/bin"
|
||||
|
||||
go install github.com/bufbuild/buf/cmd/buf
|
||||
go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest
|
||||
|
||||
make proto-gen
|
||||
|
||||
if ! git diff --stat --exit-code ; then
|
||||
echo ">> ERROR:"
|
||||
echo ">>"
|
||||
echo ">> Protobuf generated code requires update (either tools or .proto files may have changed)."
|
||||
echo ">> Ensure your tools are up-to-date, re-run 'make proto-gen' and update this PR."
|
||||
echo ">>"
|
||||
exit 1
|
||||
fi
|
||||
48
.github/workflows/coverage.yml
vendored
48
.github/workflows/coverage.yml
vendored
@@ -10,25 +10,25 @@ jobs:
|
||||
split-test-files:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
- name: Create a file with all the pkgs
|
||||
run: go list ./... > pkgs.txt
|
||||
- name: Split pkgs into 4 files
|
||||
run: split -d -n l/4 pkgs.txt pkgs.txt.part.
|
||||
# cache multiple
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-00"
|
||||
path: ./pkgs.txt.part.00
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-01"
|
||||
path: ./pkgs.txt.part.01
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-02"
|
||||
path: ./pkgs.txt.part.02
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-03"
|
||||
path: ./pkgs.txt.part.03
|
||||
@@ -42,11 +42,11 @@ jobs:
|
||||
goarch: ["arm", "amd64"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -64,29 +64,25 @@ jobs:
|
||||
matrix:
|
||||
part: ["00", "01", "02", "03"]
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}"
|
||||
if: env.GIT_DIFF
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.15
|
||||
- name: test & coverage report creation
|
||||
run: |
|
||||
cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/upload-artifact@v2
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
|
||||
path: ./${{ matrix.part }}profile.out
|
||||
@@ -95,33 +91,33 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-00-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-01-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-02-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
- uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: "${{ github.sha }}-03-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v1.0.15
|
||||
- uses: codecov/codecov-action@v3
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
|
||||
24
.github/workflows/docker.yml
vendored
24
.github/workflows/docker.yml
vendored
@@ -14,10 +14,7 @@ jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@master
|
||||
- uses: actions/checkout@v3
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
@@ -37,23 +34,26 @@ jobs:
|
||||
fi
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build Tendermint
|
||||
run: |
|
||||
make build-linux && cp build/tendermint DOCKER/tendermint
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2
|
||||
uses: docker/build-push-action@v3
|
||||
with:
|
||||
context: ./DOCKER
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
|
||||
31
.github/workflows/docs.yml
vendored
31
.github/workflows/docs.yml
vendored
@@ -1,31 +0,0 @@
|
||||
name: Documentation
|
||||
# This job builds and deploys documenation to github pages.
|
||||
# It runs on every push to master.
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build-and-deploy:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: tendermintdev/docker-website-deployment
|
||||
steps:
|
||||
- name: Checkout 🛎️
|
||||
uses: actions/checkout@v2.3.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install and Build 🔧
|
||||
run: |
|
||||
apk add rsync
|
||||
make build-docs
|
||||
|
||||
- name: Deploy 🚀
|
||||
uses: JamesIves/github-pages-deploy-action@3.7.1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
BRANCH: gh-pages
|
||||
FOLDER: ~/output
|
||||
35
.github/workflows/e2e-manual.yml
vendored
Normal file
35
.github/workflows/e2e-manual.yml
vendored
Normal file
@@ -0,0 +1,35 @@
|
||||
# Manually run randomly generated E2E testnets (as nightly).
|
||||
name: e2e-manual
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner tests
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly/
|
||||
|
||||
- name: Run ${{ matrix.p2p }} p2p testnets
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
76
.github/workflows/e2e-nightly-34x.yml
vendored
Normal file
76
.github/workflows/e2e-nightly-34x.yml
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
# Runs randomly generated E2E testnets nightly
|
||||
# on the 0.34.x release branch
|
||||
|
||||
# !! If you change something in this file, you probably want
|
||||
# to update the e2e-nightly-master workflow as well!
|
||||
|
||||
name: e2e-nightly-34x
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually, in theory
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
ref: 'v0.34.x'
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly
|
||||
|
||||
- name: Run testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on v0.34.x
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on v0.34.x
|
||||
SLACK_FOOTER: ''
|
||||
@@ -1,12 +1,16 @@
|
||||
# Runs randomly generated E2E testnets nightly.
|
||||
name: e2e-nightly
|
||||
# Runs randomly generated E2E testnets nightly on master
|
||||
|
||||
# !! If you change something in this file, you probably want
|
||||
# to update the e2e-nightly-34x workflow as well!
|
||||
|
||||
name: e2e-nightly-master
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
e2e-nightly-test-2:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
@@ -16,11 +20,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.15'
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
@@ -36,18 +40,34 @@ jobs:
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail:
|
||||
needs: e2e-nightly-test
|
||||
e2e-nightly-fail-2:
|
||||
needs: e2e-nightly-test-2
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed
|
||||
SLACK_MESSAGE: Nightly E2E tests failed on master
|
||||
SLACK_FOOTER: ''
|
||||
|
||||
e2e-nightly-success: # may turn this off once they seem to pass consistently
|
||||
needs: e2e-nightly-test-2
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on success
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':white_check_mark:'
|
||||
SLACK_COLOR: good
|
||||
SLACK_MESSAGE: Nightly E2E tests passed on master
|
||||
SLACK_FOOTER: ''
|
||||
8
.github/workflows/e2e.yml
vendored
8
.github/workflows/e2e.yml
vendored
@@ -13,11 +13,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.15'
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: '1.18'
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
|
||||
83
.github/workflows/fuzz-nightly.yml
vendored
Normal file
83
.github/workflows/fuzz-nightly.yml
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
# Runs fuzzing nightly.
|
||||
name: fuzz-nightly
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 3 * * *'
|
||||
|
||||
jobs:
|
||||
fuzz-nightly-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- name: Install go-fuzz
|
||||
working-directory: test/fuzz
|
||||
run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
|
||||
- name: Fuzz mempool
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-addrbook
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-addrbook
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-pex
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-pex
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-sc
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-sc
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-rpc-server
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-rpc-server
|
||||
continue-on-error: true
|
||||
|
||||
- name: Archive crashers
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: crashers
|
||||
path: test/fuzz/**/crashers
|
||||
retention-days: 1
|
||||
|
||||
- name: Archive suppressions
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: suppressions
|
||||
path: test/fuzz/**/suppressions
|
||||
retention-days: 1
|
||||
|
||||
- name: Set crashers count
|
||||
working-directory: test/fuzz
|
||||
run: echo "::set-output name=count::$(find . -type d -name 'crashers' | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')"
|
||||
id: set-crashers-count
|
||||
|
||||
outputs:
|
||||
crashers-count: ${{ steps.set-crashers-count.outputs.count }}
|
||||
|
||||
fuzz-nightly-fail:
|
||||
needs: fuzz-nightly-test
|
||||
if: ${{ needs.fuzz-nightly-test.outputs.crashers-count != 0 }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack if any crashers
|
||||
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly Fuzz Tests
|
||||
SLACK_ICON_EMOJI: ':firecracker:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Crashers found in Nightly Fuzz tests
|
||||
SLACK_FOOTER: ''
|
||||
12
.github/workflows/linkchecker.yml
vendored
12
.github/workflows/linkchecker.yml
vendored
@@ -1,12 +0,0 @@
|
||||
name: Check Markdown links
|
||||
on:
|
||||
schedule:
|
||||
- cron: '* */24 * * *'
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.8
|
||||
with:
|
||||
folder-path: "docs"
|
||||
@@ -11,19 +11,24 @@ jobs:
|
||||
golangci:
|
||||
name: golangci-lint
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
timeout-minutes: 8
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v2.3.0
|
||||
- uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.31
|
||||
# Required: the version of golangci-lint is required and
|
||||
# must be specified without patch version: we always use the
|
||||
# latest patch version.
|
||||
version: v1.50.1
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
5
.github/workflows/linter.yml
vendored
5
.github/workflows/linter.yml
vendored
@@ -11,6 +11,7 @@ on:
|
||||
branches: [master]
|
||||
paths:
|
||||
- "**.md"
|
||||
- "**.yml"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -18,7 +19,7 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
- name: Lint Code Base
|
||||
uses: docker://github/super-linter:v3
|
||||
env:
|
||||
@@ -27,5 +28,5 @@ jobs:
|
||||
DEFAULT_BRANCH: master
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
VALIDATE_MD: true
|
||||
VALIDATE_OPAENAPI: true
|
||||
VALIDATE_OPENAPI: true
|
||||
VALIDATE_YAML: true
|
||||
|
||||
65
.github/workflows/pre-release.yml
vendored
Normal file
65
.github/workflows/pre-release.yml
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
name: "Pre-release"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-alpha.[0-9]+" # e.g. v0.37.0-alpha.1, v0.38.0-alpha.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-beta.[0-9]+" # e.g. v0.37.0-beta.1, v0.38.0-beta.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc[0-9]+" # e.g. v0.37.0-rc1, v0.38.0-rc10
|
||||
|
||||
jobs:
|
||||
prerelease:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.18'
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
|
||||
|
||||
# Link to CHANGELOG_PENDING.md as release notes.
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG_PENDING.md > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
prerelease-success:
|
||||
needs: prerelease
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon pre-release
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":sparkles: New Tendermint pre-release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
21
.github/workflows/proto-lint.yml
vendored
Normal file
21
.github/workflows/proto-lint.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Protobuf Lint
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- 'proto/**'
|
||||
push:
|
||||
branches:
|
||||
- v0.34.x
|
||||
paths:
|
||||
- 'proto/**'
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: bufbuild/buf-setup-action@v1.8.0
|
||||
- uses: bufbuild/buf-lint-action@v1
|
||||
with:
|
||||
input: 'proto'
|
||||
22
.github/workflows/proto.yml
vendored
22
.github/workflows/proto.yml
vendored
@@ -1,22 +0,0 @@
|
||||
name: Protobuf
|
||||
# Protobuf runs buf (https://buf.build/) lint and check-breakage
|
||||
# This workflow is only run when a .proto file has been modified
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "**.proto"
|
||||
jobs:
|
||||
proto-lint:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: lint
|
||||
run: make proto-lint
|
||||
proto-breakage:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: check-breakage
|
||||
run: make proto-check-breaking-ci
|
||||
49
.github/workflows/release.yml
vendored
49
.github/workflows/release.yml
vendored
@@ -3,27 +3,60 @@ name: "Release"
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: '1.15'
|
||||
go-version: '1.18'
|
||||
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v3
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
release-success:
|
||||
needs: release
|
||||
if: ${{ success() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack upon release
|
||||
uses: slackapi/slack-github-action@v1.23.0
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
SLACK_WEBHOOK_TYPE: INCOMING_WEBHOOK
|
||||
RELEASE_URL: "${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ github.ref_name }}"
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"blocks": [
|
||||
{
|
||||
"type": "section",
|
||||
"text": {
|
||||
"type": "mrkdwn",
|
||||
"text": ":rocket: New Tendermint release: <${{ env.RELEASE_URL }}|${{ github.ref_name }}>"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
2
.github/workflows/stale.yml
vendored
2
.github/workflows/stale.yml
vendored
@@ -7,7 +7,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v3
|
||||
- uses: actions/stale@v6
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
|
||||
|
||||
48
.github/workflows/tests.yml
vendored
48
.github/workflows/tests.yml
vendored
@@ -23,11 +23,11 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
- name: install
|
||||
run: make install install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: actions/cache@v2.1.3
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
@@ -44,7 +44,7 @@ jobs:
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
# Cache binaries for use by other jobs
|
||||
- uses: actions/cache@v2.1.3
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
@@ -55,24 +55,24 @@ jobs:
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: "^1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.3
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.3
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
@@ -87,24 +87,24 @@ jobs:
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: "^1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.3
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.3
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
@@ -118,24 +118,24 @@ jobs:
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
go-version: "1.18"
|
||||
- uses: actions/checkout@v3
|
||||
- uses: technote-space/get-diff-action@v6
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.3
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.3
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
|
||||
76
.gitignore
vendored
76
.gitignore
vendored
@@ -1,43 +1,69 @@
|
||||
*.bak
|
||||
*.iml
|
||||
*.log
|
||||
*.swo
|
||||
*.swp
|
||||
*/.glide
|
||||
*/vendor
|
||||
.DS_Store
|
||||
*.swo
|
||||
.bak
|
||||
.idea/
|
||||
.revision
|
||||
.tendermint
|
||||
.tendermint-lite
|
||||
.terraform
|
||||
.vagrant
|
||||
.vendor-new/
|
||||
.vscode/
|
||||
abci-cli
|
||||
addrbook.json
|
||||
artifacts/*
|
||||
*.bak
|
||||
.DS_Store
|
||||
build/*
|
||||
rpc/test/.tendermint
|
||||
.tendermint
|
||||
remote_dump
|
||||
.revision
|
||||
vendor
|
||||
.vagrant
|
||||
test/e2e/build
|
||||
test/maverick/maverick
|
||||
test/e2e/networks/*/
|
||||
test/p2p/data/
|
||||
test/logs
|
||||
coverage.txt
|
||||
docs/.vuepress/dist
|
||||
docs/_build
|
||||
docs/dist
|
||||
docs/.vuepress/dist
|
||||
*.log
|
||||
abci-cli
|
||||
docs/node_modules/
|
||||
docs/.vuepress/public/rpc
|
||||
index.html.md
|
||||
libs/pubsub/query/fuzz_test/output
|
||||
profile\.out
|
||||
remote_dump
|
||||
rpc/test/.tendermint
|
||||
scripts/cutWALUntil/cutWALUntil
|
||||
|
||||
scripts/wal2json/wal2json
|
||||
scripts/cutWALUntil/cutWALUntil
|
||||
|
||||
.idea/
|
||||
*.iml
|
||||
|
||||
.vscode/
|
||||
|
||||
libs/pubsub/query/fuzz_test/output
|
||||
shunit2
|
||||
|
||||
.tendermint-lite
|
||||
addrbook.json
|
||||
|
||||
*/vendor
|
||||
.vendor-new/
|
||||
*/.glide
|
||||
.terraform
|
||||
terraform.tfstate
|
||||
terraform.tfstate.backup
|
||||
terraform.tfstate.d
|
||||
profile\.out
|
||||
test/app/grpc_client
|
||||
test/loadtime/build
|
||||
test/e2e/build
|
||||
test/e2e/networks/*/
|
||||
test/logs
|
||||
test/maverick/maverick
|
||||
test/p2p/data/
|
||||
vendor
|
||||
vendor
|
||||
test/fuzz/**/corpus
|
||||
test/fuzz/**/crashers
|
||||
test/fuzz/**/suppressions
|
||||
test/fuzz/**/*.zip
|
||||
*.aux
|
||||
*.bbl
|
||||
*.blg
|
||||
*.pdf
|
||||
*.gz
|
||||
*.dvi
|
||||
# Python virtual environments
|
||||
.venv
|
||||
|
||||
@@ -1,61 +1,46 @@
|
||||
linters:
|
||||
enable:
|
||||
- asciicheck
|
||||
- bodyclose
|
||||
- deadcode
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- errcheck
|
||||
# - funlen
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
- exportloopref
|
||||
- goconst
|
||||
- gocritic
|
||||
# - gocyclo
|
||||
# - godox
|
||||
- gofmt
|
||||
- goimports
|
||||
- golint
|
||||
- revive
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
# - interfacer
|
||||
- lll
|
||||
- misspell
|
||||
# - maligned
|
||||
- nakedret
|
||||
- nolintlint
|
||||
- prealloc
|
||||
- scopelint
|
||||
- staticcheck
|
||||
- structcheck
|
||||
# - structcheck // to be fixed by golangci-lint
|
||||
- stylecheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
# - unparam
|
||||
- unused
|
||||
- varcheck
|
||||
# - whitespace
|
||||
# - wsl
|
||||
# - gocognit
|
||||
- nolintlint
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- gosec
|
||||
- linters:
|
||||
- lll
|
||||
source: "https://"
|
||||
max-same-issues: 50
|
||||
|
||||
linters-settings:
|
||||
dogsled:
|
||||
max-blank-identifiers: 3
|
||||
maligned:
|
||||
suggest-new: true
|
||||
# govet:
|
||||
# check-shadowing: true
|
||||
golint:
|
||||
min-confidence: 0
|
||||
maligned:
|
||||
suggest-new: true
|
||||
|
||||
run:
|
||||
skip-files:
|
||||
- libs/pubsub/query/query.peg.go
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
project_name: Tendermint
|
||||
project_name: tendermint
|
||||
|
||||
env:
|
||||
# Require use of Go modules.
|
||||
- GO111MODULE=on
|
||||
|
||||
builds:
|
||||
- id: "tendermint"
|
||||
- id: "Tendermint"
|
||||
main: ./cmd/tendermint/main.go
|
||||
ldflags:
|
||||
- -s -w -X github.com/tendermint/tendermint/version.TMCoreSemVer={{ .Version }}
|
||||
@@ -25,12 +25,13 @@ checksum:
|
||||
algorithm: sha256
|
||||
|
||||
release:
|
||||
name_template: "{{.Version}} (WARNING: BETA SOFTWARE)"
|
||||
prerelease: auto
|
||||
name_template: "{{.Version}}"
|
||||
|
||||
archives:
|
||||
files:
|
||||
- LICENSE
|
||||
- README.md
|
||||
- UPGRADING.md
|
||||
- SECURITY.md
|
||||
- CHANGELOG.md
|
||||
- files:
|
||||
- LICENSE
|
||||
- README.md
|
||||
- UPGRADING.md
|
||||
- SECURITY.md
|
||||
- CHANGELOG.md
|
||||
|
||||
11
.markdownlint.yml
Normal file
11
.markdownlint.yml
Normal file
@@ -0,0 +1,11 @@
|
||||
default: true
|
||||
MD001: false
|
||||
MD007: {indent: 4}
|
||||
MD013: false
|
||||
MD024: {siblings_only: true}
|
||||
MD025: false
|
||||
MD033: false
|
||||
MD036: false
|
||||
MD010: false
|
||||
MD012: false
|
||||
MD028: false
|
||||
10
.mergify.yml
Normal file
10
.mergify.yml
Normal file
@@ -0,0 +1,10 @@
|
||||
pull_request_rules:
|
||||
- name: Automerge to master
|
||||
conditions:
|
||||
- base=master
|
||||
- label=S:automerge
|
||||
actions:
|
||||
merge:
|
||||
method: squash
|
||||
strict: true
|
||||
commit_message: title+body
|
||||
9
.vscode/settings.json
vendored
Normal file
9
.vscode/settings.json
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"protoc": {
|
||||
"options": [
|
||||
"--proto_path=${workspaceRoot}/proto",
|
||||
"--proto_path=${workspaceRoot}/third_party/proto"
|
||||
]
|
||||
}
|
||||
|
||||
}
|
||||
575
CHANGELOG.md
575
CHANGELOG.md
@@ -1,5 +1,448 @@
|
||||
# Changelog
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
## v0.34.24
|
||||
|
||||
*Nov 21, 2022*
|
||||
|
||||
Apart from one minor bug fix, this release aims to optimize the output of the
|
||||
RPC (both HTTP and WebSocket endpoints). See our [upgrading
|
||||
guidelines](./UPGRADING.md#v03424) for more details.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- `[rpc]` [\#9724](https://github.com/tendermint/tendermint/issues/9724) Remove
|
||||
useless whitespace in RPC output (@adizere, @thanethomson)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- `[rpc]` [\#9692](https://github.com/tendermint/tendermint/issues/9692) Remove
|
||||
`Cache-Control` header response from `/check_tx` endpoint (@JayT106)
|
||||
|
||||
## v0.34.23
|
||||
|
||||
*Nov 9, 2022*
|
||||
|
||||
This release introduces some new Prometheus metrics to help in determining what
|
||||
kinds of messages are consuming the most P2P bandwidth. This builds towards our
|
||||
broader goal of optimizing Tendermint bandwidth consumption, and will give us
|
||||
meaningful insights once we can establish these metrics for a number of chains.
|
||||
|
||||
We now also return `Cache-Control` headers for select RPC endpoints to help
|
||||
facilitate caching.
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106
|
||||
|
||||
### IMPROVEMENTS
|
||||
- `[p2p]` [\#9641](https://github.com/tendermint/tendermint/issues/9641) Add new
|
||||
Envelope type and associated methods for sending and receiving Envelopes
|
||||
instead of raw bytes. This also adds new metrics,
|
||||
`tendermint_p2p_message_send_bytes_total` and
|
||||
`tendermint_p2p_message_receive_bytes_total`, that expose how many bytes of
|
||||
each message type have been sent.
|
||||
- `[rpc]` [\#9666](https://github.com/tendermint/tendermint/issues/9666) Enable
|
||||
caching of RPC responses (@JayT106)
|
||||
|
||||
The following RPC endpoints will return `Cache-Control` headers with a maximum
|
||||
age of 1 day:
|
||||
|
||||
- `/abci_info`
|
||||
- `/block`, if `height` is supplied
|
||||
- `/block_by_hash`
|
||||
- `/block_results`, if `height` is supplied
|
||||
- `/blockchain`
|
||||
- `/check_tx`
|
||||
- `/commit`, if `height` is supplied
|
||||
- `/consensus_params`, if `height` is supplied
|
||||
- `/genesis`
|
||||
- `/genesis_chunked`
|
||||
- `/tx`
|
||||
- `/validators`, if `height` is supplied
|
||||
|
||||
## v0.34.22
|
||||
|
||||
This release includes several bug fixes, [one of
|
||||
which](https://github.com/tendermint/tendermint/pull/9518) we discovered while
|
||||
building up a baseline for v0.34 against which to compare our upcoming v0.37
|
||||
release during our [QA process](./docs/qa/).
|
||||
|
||||
Special thanks to external contributors on this release: @RiccardoM
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#9423](https://github.com/tendermint/tendermint/pull/9423) Support
|
||||
HTTPS URLs from the WebSocket client (@RiccardoM, @cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [config] [\#9483](https://github.com/tendermint/tendermint/issues/9483)
|
||||
Calling `tendermint init` would incorrectly leave out the new `[storage]`
|
||||
section delimiter in the generated configuration file - this has now been
|
||||
fixed
|
||||
- [p2p] [\#9500](https://github.com/tendermint/tendermint/issues/9500) Prevent
|
||||
peers who have errored being added to the peer set (@jmalicevic)
|
||||
- [indexer] [\#9473](https://github.com/tendermint/tendermint/issues/9473) Fix
|
||||
bug that caused the psql indexer to index empty blocks whenever one of the
|
||||
transactions returned a non zero code. The relevant deduplication logic has
|
||||
been moved within the kv indexer only (@cmwaters)
|
||||
- [blocksync] [\#9518](https://github.com/tendermint/tendermint/issues/9518) A
|
||||
block sync stall was observed during our QA process whereby the node was
|
||||
unable to make progress. Retrying block requests after a timeout fixes this.
|
||||
|
||||
## v0.34.21
|
||||
|
||||
Release highlights include:
|
||||
|
||||
- A new `[storage]` configuration section and flag `discard_abci_responses`,
|
||||
which, if enabled, discards all ABCI responses except the latest one in order
|
||||
to reduce disk space usage in the state store. When enabled, the
|
||||
`block_results` RPC endpoint can no longer function and will return an error.
|
||||
- A new CLI command, `reindex-event`, to re-index block and tx events to the
|
||||
event sinks. You can run this command when the event store backend
|
||||
dropped/disconnected or you want to replace the backend. When
|
||||
`discard_abci_responses` is enabled, you will not be able to use this command.
|
||||
|
||||
Special thanks to external contributors on this release: @rootwarp & @animart
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#9083](https://github.com/tendermint/tendermint/issues/9083) Backport command to reindex missed events (@cmwaters)
|
||||
- [cli] [\#9107](https://github.com/tendermint/tendermint/issues/9107) Add the `p2p.external-address` argument to set the node P2P external address (@amimart)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [config] [\#9054](https://github.com/tendermint/tendermint/issues/9054) `discard_abci_responses` flag added to discard all ABCI
|
||||
responses except the last in order to save on storage space in the state
|
||||
store (@samricotta)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [mempool] [\#9033](https://github.com/tendermint/tendermint/issues/9033) Rework lock discipline to mitigate callback deadlocks in the
|
||||
priority mempool
|
||||
- [cli] [\#9103](https://github.com/tendermint/tendermint/issues/9103) fix unsafe-reset-all for working with home path (@rootwarp)
|
||||
|
||||
## v0.34.20
|
||||
|
||||
Special thanks to external contributors on this release: @joeabbey @yihuang
|
||||
|
||||
This release introduces a prioritized mempool. Further notes can be found in UPGRADING.md.
|
||||
|
||||
NOTE: There's a known issue when combining the prioritized mempool with the ABCI socket client, that the team are curently working to resolve. Read more about the issue [here](https://github.com/tendermint/tendermint/pull/9030).
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [blocksync] [\#8496](https://github.com/tendermint/tendermint/pull/8496) validate block against state before persisting it to disk (@cmwaters)
|
||||
- [indexer] [#8625](https://github.com/tendermint/tendermint/pull/8625) Fix overriding tx index of duplicated txs. (@yihuang)
|
||||
- [mempool] [\#8962](https://github.com/tendermint/tendermint/issues/8962) Backport priority mempool fixes from v0.35.x to v0.34.x (@creachadair).
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [cli] [\#8674] Add command to force compact goleveldb databases (@cmwaters)
|
||||
- [mempool] [\#8695] Port back the priority mempool. (@alexanderbez, @jmalicevic, @cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [logging] [\#8845](https://github.com/tendermint/tendermint/issues/8845) Add "Lazy" Stringers to defer Sprintf and Hash until logs print. (@joeabbey)
|
||||
|
||||
## v0.34.19
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [cli] [\#8270](https://github.com/tendermint/tendermint/issues/8270) fix reset commands (@alexanderbez).
|
||||
|
||||
## v0.34.18
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [cli] [\#8258](https://github.com/tendermint/tendermint/pull/8258) Fix a bug in the cli that caused `unsafe-reset-all` to panic
|
||||
|
||||
## v0.34.17
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
|
||||
- [cli] [\#8081](https://github.com/tendermint/tendermint/issues/8081) make the reset command safe to use (@marbar3778).
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#8079](https://github.com/tendermint/tendermint/issues/8079) start the timeout ticker before relay (backport #7844) (@creachadair).
|
||||
- [consensus] [\#7992](https://github.com/tendermint/tendermint/issues/7992) [\#7994](https://github.com/tendermint/tendermint/issues/7994) change lock handling in handleMsg and reactor to alleviate issues gossiping during long ABCI calls (@williambanfield).
|
||||
|
||||
## v0.34.16
|
||||
|
||||
Special thanks to external contributors on this release: @yihuang
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#7617](https://github.com/tendermint/tendermint/issues/7617) calculate prevote message delay metric (backport #7551) (@williambanfield).
|
||||
- [consensus] [\#7631](https://github.com/tendermint/tendermint/issues/7631) check proposal non-nil in prevote message delay metric (backport #7625) (@williambanfield).
|
||||
- [statesync] [\#7885](https://github.com/tendermint/tendermint/issues/7885) statesync: assert app version matches (backport #7856) (@cmwaters).
|
||||
- [statesync] [\#7881](https://github.com/tendermint/tendermint/issues/7881) fix app hash in state rollback (backport #7837) (@cmwaters).
|
||||
- [cli] [#7837](https://github.com/tendermint/tendermint/pull/7837) fix app hash in state rollback. (@yihuang).
|
||||
|
||||
## v0.34.15
|
||||
|
||||
Special thanks to external contributors on this release: @thanethomson
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [\#7368](https://github.com/tendermint/tendermint/issues/7368) cmd: add integration test for rollback functionality (@cmwaters).
|
||||
- [\#7309](https://github.com/tendermint/tendermint/issues/7309) pubsub: Report a non-nil error when shutting down (fixes #7306).
|
||||
- [\#7057](https://github.com/tendermint/tendermint/pull/7057) Import Postgres driver support for the psql indexer (@creachadair).
|
||||
- [\#7106](https://github.com/tendermint/tendermint/pull/7106) Revert mutex change to ABCI Clients (@tychoish).
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [config] [\#7230](https://github.com/tendermint/tendermint/issues/7230) rpc: Add experimental config params to allow for subscription buffer size control (@thanethomson).
|
||||
|
||||
## v0.34.14
|
||||
|
||||
This release backports the `rollback` feature to allow recovery in the event of an incorrect app hash.
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [\#6982](https://github.com/tendermint/tendermint/pull/6982) The tendermint binary now has built-in suppport for running the end-to-end test application (with state sync support) (@cmwaters).
|
||||
- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state. This may be useful in the event of non-determinstic app hash or when reverting an upgrade. @cmwaters
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [\#7103](https://github.com/tendermint/tendermint/pull/7104) Remove IAVL dependency (backport of #6550) (@cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [\#7057](https://github.com/tendermint/tendermint/pull/7057) Import Postgres driver support for the psql indexer (@creachadair).
|
||||
- [ABCI] [\#7110](https://github.com/tendermint/tendermint/issues/7110) Revert "change client to use multi-reader mutexes (#6873)" (@tychoish).
|
||||
|
||||
## v0.34.13
|
||||
|
||||
This release backports improvements to state synchronization and ABCI
|
||||
performance under concurrent load, and the PostgreSQL event indexer.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [statesync] [\#6881](https://github.com/tendermint/tendermint/issues/6881) improvements to stateprovider logic (@cmwaters)
|
||||
- [ABCI] [\#6873](https://github.com/tendermint/tendermint/issues/6873) change client to use multi-reader mutexes (@tychoish)
|
||||
- [indexing] [\#6906](https://github.com/tendermint/tendermint/issues/6906) enable the PostgreSQL indexer sink (@creachadair)
|
||||
|
||||
## v0.34.12
|
||||
|
||||
Special thanks to external contributors on this release: @JayT106.
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#6717](https://github.com/tendermint/tendermint/pull/6717) introduce
|
||||
`/genesis_chunked` rpc endpoint for handling large genesis files by chunking them (@tychoish)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [rpc] [\#6825](https://github.com/tendermint/tendermint/issues/6825) Remove egregious INFO log from `ABCI#Query` RPC. (@alexanderbez)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [light] [\#6685](https://github.com/tendermint/tendermint/pull/6685) fix bug
|
||||
with incorrectly handling contexts that would occasionally freeze state sync. (@cmwaters)
|
||||
- [privval] [\#6748](https://github.com/tendermint/tendermint/issues/6748) Fix vote timestamp to prevent chain halt (@JayT106)
|
||||
|
||||
## v0.34.11
|
||||
|
||||
*June 18, 2021*
|
||||
|
||||
This release improves the robustness of statesync; tweaking channel priorities and timeouts and
|
||||
adding two new parameters to the state sync config.
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Apps
|
||||
- [Version] [\#6494](https://github.com/tendermint/tendermint/issues/6494) `TMCoreSemVer` is not required to be set as a ldflag any longer.
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [statesync] [\#6566](https://github.com/tendermint/tendermint/issues/6566) Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
|
||||
- [statesync] [\#6378](https://github.com/tendermint/tendermint/issues/6378) Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish)
|
||||
- [statesync] [\#6582](https://github.com/tendermint/tendermint/issues/6582) Increase chunk priority and add multiple retry chunk requests (@cmwaters)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [evidence] [\#6375](https://github.com/tendermint/tendermint/issues/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters)
|
||||
|
||||
## v0.34.10
|
||||
|
||||
*April 14, 2021*
|
||||
|
||||
This release fixes a bug where peers would sometimes try to send messages
|
||||
on incorrect channels. Special thanks to our friends at Oasis Labs for surfacing
|
||||
this issue!
|
||||
|
||||
- [p2p/node] [\#6339](https://github.com/tendermint/tendermint/issues/6339) Fix bug with using custom channels (@cmwaters)
|
||||
- [light] [\#6346](https://github.com/tendermint/tendermint/issues/6346) Correctly handle too high errors to improve client robustness (@cmwaters)
|
||||
|
||||
## v0.34.9
|
||||
|
||||
*April 8, 2021*
|
||||
|
||||
This release fixes a moderate severity security issue, Security Advisory Alderfly,
|
||||
which impacts all networks that rely on Tendermint light clients.
|
||||
Further details will be released once networks have upgraded.
|
||||
|
||||
This release also includes a small Go API-breaking change, to reduce panics in the RPC layer.
|
||||
|
||||
Special thanks to our external contributors on this release: @gchaincl
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Go API
|
||||
- [rpc/jsonrpc/server] [\#6204](https://github.com/tendermint/tendermint/issues/6204) Modify `WriteRPCResponseHTTP(Error)` to return an error (@melekes)
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [rpc] [\#6226](https://github.com/tendermint/tendermint/issues/6226) Index block events and expose a new RPC method, `/block_search`, to allow querying for blocks by `BeginBlock` and `EndBlock` events (@alexanderbez)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [rpc/jsonrpc/server] [\#6191](https://github.com/tendermint/tendermint/issues/6191) Correctly unmarshal `RPCRequest` when data is `null` (@melekes)
|
||||
- [p2p] [\#6289](https://github.com/tendermint/tendermint/issues/6289) Fix "unknown channels" bug on CustomReactors (@gchaincl)
|
||||
- [light/evidence] Adds logic to handle forward lunatic attacks (@cmwaters)
|
||||
|
||||
## v0.34.8
|
||||
|
||||
*February 25, 2021*
|
||||
|
||||
This release, in conjunction with [a fix in the Cosmos SDK](https://github.com/cosmos/cosmos-sdk/pull/8641),
|
||||
introduces changes that should mean the logs are much, much quieter. 🎉
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [libs/log] [\#6174](https://github.com/tendermint/tendermint/issues/6174) Include timestamp (`ts` field; `time.RFC3339Nano` format) in JSON logger output (@melekes)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [abci] [\#6124](https://github.com/tendermint/tendermint/issues/6124) Fixes a panic condition during callback execution in `ReCheckTx` during high tx load. (@alexanderbez)
|
||||
|
||||
## v0.34.7
|
||||
|
||||
*February 18, 2021*
|
||||
|
||||
This release fixes a downstream security issue which impacts Cosmos SDK
|
||||
users who are:
|
||||
|
||||
* Using Cosmos SDK v0.40.0 or later, AND
|
||||
* Running validator nodes, AND
|
||||
* Using the file-based `FilePV` implementation for their consensus keys
|
||||
|
||||
Users who fulfill all the above criteria were susceptible to leaking
|
||||
private key material in the logs. All other users are unaffected.
|
||||
|
||||
The root cause was a discrepancy
|
||||
between the Tendermint Core (untyped) logger and the Cosmos SDK (typed) logger:
|
||||
Tendermint Core's logger automatically stringifies Go interfaces whenever possible;
|
||||
however, the Cosmos SDK's logger uses reflection to log the fields within a Go interface.
|
||||
|
||||
The introduction of the typed logger meant that previously un-logged fields within
|
||||
interfaces are now sometimes logged, including the private key material inside the
|
||||
`FilePV` struct.
|
||||
|
||||
Tendermint Core v0.34.7 fixes this issue; however, we strongly recommend that all validators
|
||||
use remote signer implementations instead of `FilePV` in production.
|
||||
|
||||
Thank you to @joe-bowman for his assistance with this vulnerability and a particular
|
||||
shout-out to @marbar3778 for diagnosing it quickly.
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [consensus] [\#6128](https://github.com/tendermint/tendermint/pull/6128) Remove privValidator from log call (@tessr)
|
||||
|
||||
## v0.34.6
|
||||
|
||||
*February 18, 2021*
|
||||
|
||||
_Tendermint Core v0.34.5 and v0.34.6 have been recalled due to build tooling problems._
|
||||
|
||||
## v0.34.4
|
||||
|
||||
*February 11, 2021*
|
||||
|
||||
This release includes a fix for a memory leak in the evidence reactor (see #6068, below).
|
||||
All Tendermint clients are recommended to upgrade.
|
||||
Thank you to our friends at Crypto.com for the initial report of this memory leak!
|
||||
|
||||
Special thanks to other external contributors on this release: @yayajacky, @odidev, @laniehei, and @c29r3!
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [light] [\#6022](https://github.com/tendermint/tendermint/pull/6022) Fix a bug when the number of validators equals 100 (@melekes)
|
||||
- [light] [\#6026](https://github.com/tendermint/tendermint/pull/6026) Fix a bug when height isn't provided for the rpc calls: `/commit` and `/validators` (@cmwaters)
|
||||
- [evidence] [\#6068](https://github.com/tendermint/tendermint/pull/6068) Terminate broadcastEvidenceRoutine when peer is stopped (@melekes)
|
||||
|
||||
|
||||
## v0.34.3
|
||||
|
||||
*January 19, 2021*
|
||||
|
||||
This release includes a fix for a high-severity security vulnerability,
|
||||
a DoS-vector that impacted Tendermint Core v0.34.0-v0.34.2. For more details, see
|
||||
[Security Advisory Mulberry](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg)
|
||||
or https://nvd.nist.gov/vuln/detail/CVE-2021-21271.
|
||||
|
||||
Tendermint Core v0.34.3 also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for
|
||||
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [evidence] [[security fix]](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg) Use correct source of evidence time (@cmwaters)
|
||||
- [proto] [\#5886](https://github.com/tendermint/tendermint/pull/5889) Bump gogoproto to 1.3.2 (@marbar3778)
|
||||
|
||||
## v0.34.2
|
||||
|
||||
*January 12, 2021*
|
||||
|
||||
This release fixes a substantial bug in evidence handling where evidence could
|
||||
sometimes be broadcast before the block containing that evidence was fully committed,
|
||||
resulting in some nodes panicking when trying to verify said evidence.
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Go API
|
||||
- [libs/os] [\#5871](https://github.com/tendermint/tendermint/issues/5871) `EnsureDir` now propagates IO errors and checks the file type (@erikgrinaker)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [evidence] [\#5890](https://github.com/tendermint/tendermint/pull/5890) Add a buffer to evidence from consensus to avoid broadcasting and proposing evidence before the
|
||||
height of such an evidence has finished (@cmwaters)
|
||||
- [statesync] [\#5889](https://github.com/tendermint/tendermint/issues/5889) Set `LastHeightConsensusParamsChanged` when bootstrapping Tendermint state (@cmwaters)
|
||||
|
||||
## v0.34.1
|
||||
|
||||
*January 6, 2021*
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
@p4u from vocdoni.io reported that the mempool might behave incorrectly under a
|
||||
high load. The consequences can range from pauses between blocks to the peers
|
||||
disconnecting from this node. As a temporary remedy (until the mempool package
|
||||
is refactored), the `max-batch-bytes` was disabled. Transactions will be sent
|
||||
one by one without batching.
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [cli] [\#5786](https://github.com/tendermint/tendermint/issues/5786) deprecate snake_case commands for hyphen-case (@cmwaters)
|
||||
|
||||
- Go API
|
||||
- [libs/protoio] [\#5868](https://github.com/tendermint/tendermint/issues/5868) Return number of bytes read in `Reader.ReadMsg()` (@erikgrinaker)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [mempool] [\#5813](https://github.com/tendermint/tendermint/issues/5813) Add `keep-invalid-txs-in-cache` config option. When set to true, mempool will keep invalid transactions in the cache (@p4u)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [crypto] [\#5707](https://github.com/tendermint/tendermint/issues/5707) Fix infinite recursion in string formatting of Secp256k1 keys (@erikgrinaker)
|
||||
- [mempool] [\#5800](https://github.com/tendermint/tendermint/issues/5800) Disable `max-batch-bytes` (@melekes)
|
||||
- [p2p] [\#5868](https://github.com/tendermint/tendermint/issues/5868) Fix inbound traffic statistics and rate limiting in `MConnection` (@erikgrinaker)
|
||||
|
||||
## v0.34.0
|
||||
|
||||
*November 19, 2020*
|
||||
@@ -10,8 +453,6 @@ Holy smokes, this is a big one! For a more reader-friendly overview of the chang
|
||||
Special thanks to external contributors on this release: @james-ray, @fedekunze, @favadi, @alessio,
|
||||
@joe-bowman, @cuonglm, @SadPencil and @dongsam.
|
||||
|
||||
And as always, friendly reminder, that we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -38,14 +479,14 @@ And as always, friendly reminder, that we have a [bug bounty program](https://ha
|
||||
- [blockchain] [\#4637](https://github.com/tendermint/tendermint/pull/4637) Migrate blockchain reactor(s) to Protobuf encoding (@marbar3778)
|
||||
- [evidence] [\#4949](https://github.com/tendermint/tendermint/pull/4949) Migrate evidence reactor to Protobuf encoding (@marbar3778)
|
||||
- [mempool] [\#4940](https://github.com/tendermint/tendermint/pull/4940) Migrate mempool from to Protobuf encoding (@marbar3778)
|
||||
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
|
||||
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
|
||||
- `MaxBatchBytes` new config setting defines the max size of one batch.
|
||||
- [p2p/pex] [\#4973](https://github.com/tendermint/tendermint/pull/4973) Migrate `p2p/pex` reactor to Protobuf encoding (@marbar3778)
|
||||
- [statesync] [\#4943](https://github.com/tendermint/tendermint/pull/4943) Migrate state sync reactor to Protobuf encoding (@marbar3778)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
|
||||
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
|
||||
- [evidence] [\#5499](https://github.com/tendermint/tendermint/pull/5449) Cap evidence to a maximum number of bytes (supercedes [\#4780](https://github.com/tendermint/tendermint/pull/4780)) (@cmwaters)
|
||||
- [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) Header hashes are no longer empty for empty inputs, notably `DataHash`, `EvidenceHash`, and `LastResultsHash` (@erikgrinaker)
|
||||
- [state] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Include `GasWanted` and `GasUsed` into `LastResultsHash` (@melekes)
|
||||
@@ -104,7 +545,7 @@ And as always, friendly reminder, that we have a [bug bounty program](https://ha
|
||||
- [types] [\#4852](https://github.com/tendermint/tendermint/pull/4852) Vote & Proposal `SignBytes` is now func `VoteSignBytes` & `ProposalSignBytes` (@marbar3778)
|
||||
- [types] [\#4798](https://github.com/tendermint/tendermint/pull/4798) Simplify `VerifyCommitTrusting` func + remove extra validation (@melekes)
|
||||
- [types] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Remove `ABCIResult` (@melekes)
|
||||
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
|
||||
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `Total` in `Parts` & `PartSetHeader` has been changed from a `int` to a `uint32` (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Vote: `ValidatorIndex` & `Round` are now `int32` (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Proposal: `POLRound` & `Round` are now `int32` (@marbar3778)
|
||||
@@ -117,7 +558,7 @@ And as always, friendly reminder, that we have a [bug bounty program](https://ha
|
||||
- [abci] [\#5174](https://github.com/tendermint/tendermint/pull/5174) Remove `MockEvidence` in favor of testing with actual evidence types (`DuplicateVoteEvidence` & `LightClientAttackEvidence`) (@cmwaters)
|
||||
- [abci] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `InitChain.InitialHeight` field giving the initial block height (@erikgrinaker)
|
||||
- [abci] [\#5227](https://github.com/tendermint/tendermint/pull/5227) Add `ResponseInitChain.app_hash` which is recorded in genesis block (@erikgrinaker)
|
||||
- [config] [\#5147](https://github.com/tendermint/tendermint/pull/5147) Add `--consensus.double_sign_check_height` flag and `DoubleSignCheckHeight` config variable. See [ADR-51](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-051-double-signing-risk-reduction.md) (@dongsam)
|
||||
- [config] [\#5147](https://github.com/tendermint/tendermint/pull/5147) Add `--consensus.double_sign_check_height` flag and `DoubleSignCheckHeight` config variable. See [ADR-51](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-051-double-signing-risk-reduction.md) (@dongsam)
|
||||
- [db] [\#5233](https://github.com/tendermint/tendermint/pull/5233) Add support for `badgerdb` database backend (@erikgrinaker)
|
||||
- [evidence] [\#4532](https://github.com/tendermint/tendermint/pull/4532) Handle evidence from light clients (@melekes)
|
||||
- [evidence] [#4821](https://github.com/tendermint/tendermint/pull/4821) Amnesia (light client attack) evidence can be detected, verified and committed (@cmwaters)
|
||||
@@ -131,7 +572,7 @@ And as always, friendly reminder, that we have a [bug bounty program](https://ha
|
||||
- [rpc] [\#5017](https://github.com/tendermint/tendermint/pull/5017) Add `/check_tx` endpoint to check transactions without executing them or adding them to the mempool (@melekes)
|
||||
- [rpc] [\#5108](https://github.com/tendermint/tendermint/pull/5108) Subscribe using the websocket for new evidence events (@cmwaters)
|
||||
- [statesync] Add state sync support, where a new node can be rapidly bootstrapped by fetching state snapshots from peers instead of replaying blocks. See the `[statesync]` config section.
|
||||
- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and refactor evidence lifecycle - for more information see [ADR-059](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-059-evidence-composition-and-lifecycle.md) (@cmwaters)
|
||||
- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and refactor evidence lifecycle - for more information see [ADR-059](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-059-evidence-composition-and-lifecycle.md) (@cmwaters)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
@@ -142,7 +583,7 @@ And as always, friendly reminder, that we have a [bug bounty program](https://ha
|
||||
- [evidence] [\#4722](https://github.com/tendermint/tendermint/pull/4722) Consolidate evidence store and pool types to improve evidence DB (@cmwaters)
|
||||
- [evidence] [\#4839](https://github.com/tendermint/tendermint/pull/4839) Reject duplicate evidence from being proposed (@cmwaters)
|
||||
- [evidence] [\#5219](https://github.com/tendermint/tendermint/pull/5219) Change the source of evidence time to block time (@cmwaters)
|
||||
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
|
||||
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
|
||||
- [light] [\#4935](https://github.com/tendermint/tendermint/pull/4935) Fetch and compare a new header with witnesses in parallel (@melekes)
|
||||
- [light] [\#4929](https://github.com/tendermint/tendermint/pull/4929) Compare header with witnesses only when doing bisection (@melekes)
|
||||
- [light] [\#4916](https://github.com/tendermint/tendermint/pull/4916) Validate basic for inbound validator sets and headers before further processing them (@cmwaters)
|
||||
@@ -211,7 +652,7 @@ This security release fixes:
|
||||
Tendermint 0.33.0 and above allow block proposers to include signatures for the
|
||||
wrong block. This may happen naturally if you start a network, have it run for
|
||||
some time and restart it **without changing the chainID**. (It is a
|
||||
[misconfiguration](https://docs.tendermint.com/master/tendermint-core/using-tendermint.html)
|
||||
[misconfiguration](https://docs.tendermint.com/v0.33/tendermint-core/using-tendermint.html)
|
||||
to reuse chainIDs.) Correct block proposers will accidentally include signatures
|
||||
for the wrong block if they see these signatures, and then commits won't validate,
|
||||
making all proposed blocks invalid. A malicious validator (even with a minimal
|
||||
@@ -252,9 +693,6 @@ as 2/3+ of the signatures are checked._
|
||||
|
||||
Special thanks to @njmurarka at Bluzelle Networks for reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [consensus] Do not allow signatures for a wrong block in commits (@ebuchman)
|
||||
@@ -270,8 +708,6 @@ need to update your code.**
|
||||
|
||||
Special thanks to external contributors on this release: @tau3,
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -331,8 +767,6 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
|
||||
Special thanks to external contributors on this release: @whylee259, @greg-szabo
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -419,9 +853,6 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
|
||||
@@ -434,8 +865,6 @@ program](https://hackerone.com/tendermint).
|
||||
Special thanks to external contributors on this release:
|
||||
@antho1404, @michaelfig, @gterzian, @tau3, @Shivani912
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -486,9 +915,6 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
Special thanks to external contributors on this release:
|
||||
@princesinha19
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc] [\#3333](https://github.com/tendermint/tendermint/issues/3333) Add `order_by` to `/tx_search` endpoint, allowing to change default ordering from asc to desc (@princesinha19)
|
||||
@@ -507,9 +933,6 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
Special thanks to external contributors on this release: @mrekucci, @PSalant726, @princesinha19, @greg-szabo, @dongsam, @cuonglm, @jgimeno, @yenkhoon
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program.](https://hackerone.com/tendermint).
|
||||
|
||||
*January 14, 2020*
|
||||
|
||||
This release contains breaking changes to the `Block#Header`, specifically
|
||||
@@ -528,7 +951,7 @@ and a validator address plus a timestamp. Note we may remove the validator
|
||||
address & timestamp fields in the future (see ADR-25).
|
||||
|
||||
`lite2` package has been added to solve `lite` issues and introduce weak
|
||||
subjectivity interface. Refer to the [spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md) for complete details.
|
||||
subjectivity interface. Refer to the [spec](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/light-client.md) for complete details.
|
||||
`lite` package is now deprecated and will be removed in v0.34 release.
|
||||
|
||||
### BREAKING CHANGES:
|
||||
@@ -738,9 +1161,6 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
|
||||
@@ -752,9 +1172,6 @@ _January, 9, 2020_
|
||||
|
||||
Special thanks to external contributors on this release: @greg-szabo, @gregzaitsev, @yenkhoon
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc/lib] [\#4248](https://github.com/tendermint/tendermint/issues/4248) RPC client basic authentication support (@greg-szabo)
|
||||
@@ -776,9 +1193,6 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
Special thanks to external contributors on this release: @erikgrinaker, @guagualvcha, @hsyis, @cosmostuba, @whunmr, @austinabell
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program.](https://hackerone.com/tendermint).
|
||||
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -818,9 +1232,6 @@ identified and fixed here.
|
||||
Special thanks to [elvishacker](https://hackerone.com/elvishacker) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -847,9 +1258,6 @@ accepting new peers and only allowing `ed25519` pubkeys.
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for pointing
|
||||
this out.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Only allow ed25519 pubkeys when connecting
|
||||
@@ -865,9 +1273,6 @@ All clients are recommended to upgrade. See
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for discovering
|
||||
and reporting this issue.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Fix for panic on nil public key send to a peer
|
||||
@@ -878,9 +1283,6 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
Special thanks to external contributors on this release: @jon-certik, @gracenoah, @PSalant726, @gchaincl
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- CLI/RPC/Config
|
||||
@@ -910,15 +1312,12 @@ program](https://hackerone.com/tendermint).
|
||||
*August 28, 2019*
|
||||
|
||||
@climber73 wrote the [Writing a Tendermint Core application in Java
|
||||
(gRPC)](https://github.com/tendermint/tendermint/blob/master/docs/guides/java.md)
|
||||
(gRPC)](https://github.com/tendermint/tendermint/blob/v0.32.x/docs/guides/java.md)
|
||||
guide.
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@gchaincl, @bluele, @climber73
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [consensus] [\#3839](https://github.com/tendermint/tendermint/issues/3839) Reduce "Error attempting to add vote" message severity (Error -> Info)
|
||||
@@ -939,9 +1338,6 @@ program](https://hackerone.com/tendermint).
|
||||
Special thanks to external contributors on this release:
|
||||
@ruseinov, @bluele, @guagualvcha
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -949,7 +1345,7 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/master/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-043-blockchain-riri-org.md)
|
||||
- [blockchain] [\#3561](https://github.com/tendermint/tendermint/issues/3561) Add early version of the new blockchain reactor, which is supposed to be more modular and testable compared to the old version. To try it, you'll have to change `version` in the config file, [here](https://github.com/tendermint/tendermint/blob/v0.34.x/config/toml.go#L303) NOTE: It's not ready for a production yet. For further information, see [ADR-40](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-040-blockchain-reactor-refactor.md) & [ADR-43](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-043-blockchain-riri-org.md)
|
||||
- [mempool] [\#3826](https://github.com/tendermint/tendermint/issues/3826) Make `max_msg_bytes` configurable(@bluele)
|
||||
- [node] [\#3846](https://github.com/tendermint/tendermint/pull/3846) Allow replacing existing p2p.Reactor(s) using [`CustomReactors`
|
||||
option](https://godoc.org/github.com/tendermint/tendermint/node#CustomReactors).
|
||||
@@ -981,9 +1377,6 @@ This release contains a minor enhancement to the ABCI and some breaking changes
|
||||
- CheckTx requests include a `CheckTxType` enum that can be set to `Recheck` to indicate to the application that this transaction was already checked/validated and certain expensive operations (like checking signatures) can be skipped
|
||||
- Removed various functions from `libs` pkgs
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1029,9 +1422,6 @@ and the RPC, namely:
|
||||
[docs](https://github.com/tendermint/tendermint/blob/60827f75623b92eff132dc0eff5b49d2025c591e/docs/spec/abci/abci.md#events)
|
||||
- Bind RPC to localhost by default, not to the public interface [UPGRADING/RPC_Changes](./UPGRADING.md#rpc_changes)
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
@@ -1132,9 +1522,6 @@ Notes:
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
|
||||
@@ -1154,9 +1541,6 @@ identified and fixed here.
|
||||
Special thanks to [elvishacker](https://hackerone.com/elvishacker) for finding
|
||||
and reporting this.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
@@ -1183,9 +1567,6 @@ accepting new peers and only allowing `ed25519` pubkeys.
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for pointing
|
||||
this out.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Only allow ed25519 pubkeys when connecting
|
||||
@@ -1201,9 +1582,6 @@ All clients are recommended to upgrade. See
|
||||
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for discovering
|
||||
and reporting this issue.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### SECURITY:
|
||||
|
||||
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Fix for panic on nil public key send to a peer
|
||||
@@ -1284,7 +1662,7 @@ Special thanks to external contributors on this release:
|
||||
- [libs/db] [\#3611](https://github.com/tendermint/tendermint/issues/3611) Conditional compilation
|
||||
* Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
|
||||
use `make build_c` / `make install_c` (full instructions can be found at
|
||||
https://docs.tendermint.com/master/introduction/install.html#compile-with-cleveldb-support)
|
||||
<https://docs.tendermint.com/>)
|
||||
* Use `boltdb` tag to compile Tendermint with bolt db
|
||||
- [node] [\#3362](https://github.com/tendermint/tendermint/issues/3362) Return an error if `persistent_peers` list is invalid (except
|
||||
when IP lookup fails)
|
||||
@@ -1498,9 +1876,6 @@ See the [v0.31.0
|
||||
Milestone](https://github.com/tendermint/tendermint/milestone/19?closed=1) for
|
||||
more details.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
@@ -1511,7 +1886,7 @@ program](https://hackerone.com/tendermint).
|
||||
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique clientIDs with open subscriptions. Configurable via `rpc.max_subscription_clients`
|
||||
- [rpc] [\#3269](https://github.com/tendermint/tendermint/issues/2826) Limit number of unique queries a given client can subscribe to at once. Configurable via `rpc.max_subscriptions_per_client`.
|
||||
- [rpc] [\#3435](https://github.com/tendermint/tendermint/issues/3435) Default ReadTimeout and WriteTimeout changed to 10s. WriteTimeout can increased by setting `rpc.timeout_broadcast_tx_commit` in the config.
|
||||
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
|
||||
- [rpc/client] [\#3269](https://github.com/tendermint/tendermint/issues/3269) Update `EventsClient` interface to reflect new pubsub/eventBus API [ADR-33](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-033-pubsub.md). This includes `Subscribe`, `Unsubscribe`, and `UnsubscribeAll` methods.
|
||||
|
||||
* Apps
|
||||
- [abci] [\#3403](https://github.com/tendermint/tendermint/issues/3403) Remove `time_iota_ms` from BlockParams. This is a
|
||||
@@ -1564,7 +1939,7 @@ program](https://hackerone.com/tendermint).
|
||||
- [blockchain] [\#3358](https://github.com/tendermint/tendermint/pull/3358) Fix timer leak in `BlockPool` (@guagualvcha)
|
||||
- [cmd] [\#3408](https://github.com/tendermint/tendermint/issues/3408) Fix `testnet` command's panic when creating non-validator configs (using `--n` flag) (@srmo)
|
||||
- [libs/db/remotedb/grpcdb] [\#3402](https://github.com/tendermint/tendermint/issues/3402) Close Iterator/ReverseIterator after use
|
||||
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-033-pubsub.md)
|
||||
- [libs/pubsub] [\#951](https://github.com/tendermint/tendermint/issues/951), [\#1880](https://github.com/tendermint/tendermint/issues/1880) Use non-blocking send when dispatching messages [ADR-33](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-033-pubsub.md)
|
||||
- [lite] [\#3364](https://github.com/tendermint/tendermint/issues/3364) Fix `/validators` and `/abci_query` proxy endpoints
|
||||
(@guagualvcha)
|
||||
- [p2p/conn] [\#3347](https://github.com/tendermint/tendermint/issues/3347) Reject all-zero shared secrets in the Diffie-Hellman step of secret-connection
|
||||
@@ -1631,7 +2006,7 @@ For more, see issues marked
|
||||
|
||||
This release also includes a fix to prevent Tendermint from including the same
|
||||
piece of evidence in more than one block. This issue was reported by @chengwenxi in our
|
||||
[bug bounty program](https://hackerone.com/tendermint).
|
||||
[bug bounty program](https://hackerone.com/cosmos).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
@@ -1720,9 +2095,6 @@ This release contains two important fixes: one for p2p layer where we sometimes
|
||||
were not closing connections and one for consensus layer where consensus with
|
||||
no empty blocks (`create_empty_blocks = false`) could halt.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
- [pex] [\#3037](https://github.com/tendermint/tendermint/issues/3037) Only log "Reached max attempts to dial" once
|
||||
- [rpc] [\#3159](https://github.com/tendermint/tendermint/issues/3159) Expose
|
||||
@@ -1761,9 +2133,6 @@ While we are trying to stabilize the Block protocol to preserve compatibility
|
||||
with old chains, there may be some final changes yet to come before Cosmos
|
||||
launch as we continue to audit and test the software.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* CLI/RPC/Config
|
||||
@@ -1811,9 +2180,6 @@ program](https://hackerone.com/tendermint).
|
||||
Special thanks to external contributors on this release:
|
||||
@HaoyangLiu
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BUG FIXES:
|
||||
- [consensus] Fix consensus halt from proposing blocks with too much evidence
|
||||
|
||||
@@ -1941,9 +2307,6 @@ Special thanks to @dlguddus for discovering a [major
|
||||
issue](https://github.com/tendermint/tendermint/issues/2718#issuecomment-440888677)
|
||||
in the proposer selection algorithm.
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
This release is primarily about fixes to the proposer selection algorithm
|
||||
in preparation for the [Cosmos Game of
|
||||
Stakes](https://blog.cosmos.network/the-game-of-stakes-is-open-for-registration-83a404746ee6).
|
||||
@@ -2005,9 +2368,6 @@ Special thanks to external contributors on this release:
|
||||
@ackratos, @goolAdapter, @james-ray, @joe-bowman, @kostko,
|
||||
@nagarajmanjunath, @tomtau
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc] [\#2747](https://github.com/tendermint/tendermint/issues/2747) Enable subscription to tags emitted from `BeginBlock`/`EndBlock` (@kostko)
|
||||
@@ -2046,9 +2406,6 @@ program](https://hackerone.com/tendermint).
|
||||
Special thanks to external contributors on this release:
|
||||
@danil-lashin, @kevlubkcm, @krhubert, @srmo
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
* Go API
|
||||
@@ -2092,8 +2449,6 @@ program](https://hackerone.com/tendermint).
|
||||
|
||||
Special thanks to external contributors on this release: @hleb-albau, @zhuzeyu
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [rpc] [\#2582](https://github.com/tendermint/tendermint/issues/2582) Enable CORS on RPC API (@hleb-albau)
|
||||
@@ -2111,8 +2466,6 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
|
||||
Special thanks to external contributors on this release: @katakonst
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [consensus] [\#2704](https://github.com/tendermint/tendermint/issues/2704) Simplify valid POL round logic
|
||||
@@ -2140,7 +2493,7 @@ Special thanks to external contributors on this release:
|
||||
@james-ray, @overbool, @phymbert, @Slamper, @Uzair1995, @yutianwu.
|
||||
|
||||
Special thanks to @Slamper for a series of bug reports in our [bug bounty
|
||||
program](https://hackerone.com/tendermint) which are fixed in this release.
|
||||
program](https://hackerone.com/cosmos) which are fixed in this release.
|
||||
|
||||
This release is primarily about adding Version fields to various data structures,
|
||||
optimizing consensus messages for signing and verification in
|
||||
@@ -2284,9 +2637,7 @@ Special thanks to external contributors on this release:
|
||||
This release is mostly about the ConsensusParams - removing fields and enforcing MaxGas.
|
||||
It also addresses some issues found via security audit, removes various unused
|
||||
functions from `libs/common`, and implements
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-012-peer-transport.md).
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
[ADR-012](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-012-peer-transport.md).
|
||||
|
||||
BREAKING CHANGES:
|
||||
|
||||
@@ -2367,7 +2718,7 @@ BREAKING CHANGES:
|
||||
- [abci] Added address of the original proposer of the block to Header
|
||||
- [abci] Change ABCI Header to match Tendermint exactly
|
||||
- [abci] [\#2159](https://github.com/tendermint/tendermint/issues/2159) Update use of `Validator` (see
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
[ADR-018](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-018-ABCI-Validators.md)):
|
||||
- Remove PubKey from `Validator` (so it's just Address and Power)
|
||||
- Introduce `ValidatorUpdate` (with just PubKey and Power)
|
||||
- InitChain and EndBlock use ValidatorUpdate
|
||||
@@ -2389,7 +2740,7 @@ BREAKING CHANGES:
|
||||
- [state] [\#1815](https://github.com/tendermint/tendermint/issues/1815) Validator set changes are now delayed by one block (!)
|
||||
- Add NextValidatorSet to State, changes on-disk representation of state
|
||||
- [state] [\#2184](https://github.com/tendermint/tendermint/issues/2184) Enforce ConsensusParams.BlockSize.MaxBytes (See
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-020-block-size.md)).
|
||||
[ADR-020](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-020-block-size.md)).
|
||||
- Remove ConsensusParams.BlockSize.MaxTxs
|
||||
- Introduce maximum sizes for all components of a block, including ChainID
|
||||
- [types] Updates to the block Header:
|
||||
@@ -2400,7 +2751,7 @@ BREAKING CHANGES:
|
||||
- [consensus] [\#2203](https://github.com/tendermint/tendermint/issues/2203) Implement BFT time
|
||||
- Timestamp in block must be monotonic and equal the median of timestamps in block's LastCommit
|
||||
- [crypto] [\#2239](https://github.com/tendermint/tendermint/issues/2239) Secp256k1 signature changes (See
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-014-secp-malleability.md)):
|
||||
[ADR-014](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-014-secp-malleability.md)):
|
||||
- format changed from DER to `r || s`, both little endian encoded as 32 bytes.
|
||||
- malleability removed by requiring `s` to be in canonical form.
|
||||
|
||||
@@ -3200,7 +3551,7 @@ Also includes the Grand Repo-Merge of 2017.
|
||||
BREAKING CHANGES:
|
||||
|
||||
- Config and Flags:
|
||||
- The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/master/config/config.go#L11),
|
||||
- The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/v0.10.0/config/config.go#L11),
|
||||
containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusConfig`, `RPCConfig`
|
||||
- This affects the following flags:
|
||||
- `--seeds` is now `--p2p.seeds`
|
||||
|
||||
@@ -1,30 +1,16 @@
|
||||
# Unreleased Changes
|
||||
|
||||
## vX.X
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
## v0.34.25
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
|
||||
- [config] \#5728 `fast_sync = "v1"` is no longer supported (@melekes)
|
||||
- [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
|
||||
|
||||
- Apps
|
||||
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
|
||||
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- Go API
|
||||
- [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes)
|
||||
- [p2p] Removed unused function `MakePoWTarget`. (@erikgrinaker)
|
||||
- [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes)
|
||||
|
||||
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
@@ -32,19 +18,5 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778)
|
||||
- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys.
|
||||
- [abci/client] \#5673 `Async` requests return an error if queue is full (@melekes)
|
||||
- [mempool] \#5673 Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes)
|
||||
- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
|
||||
- [blockchain/v1] \#5728 Remove in favor of v2 (@melekes)
|
||||
- [blockchain/v0] \#5741 Relax termination conditions and increase sync timeout (@melekes)
|
||||
- [cli] \#5772 `gen_node_key` output now contains node ID (`id` field) (@melekes)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [types] \#5523 Change json naming of `PartSetHeader` within `BlockID` from `parts` to `part_set_header` (@marbar3778)
|
||||
- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
|
||||
- [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes)
|
||||
- [crypto] \#5707 Fix infinite recursion in string formatting of Secp256k1 keys (@erikgrinaker)
|
||||
- [blockchain/v1] \#5711 Fix deadlock (@melekes)
|
||||
|
||||
215
CONTRIBUTING.md
215
CONTRIBUTING.md
@@ -26,7 +26,7 @@ will indicate their support with a heartfelt emoji.
|
||||
|
||||
If the issue would benefit from thorough discussion, maintainers may
|
||||
request that you create a [Request For
|
||||
Comment](https://github.com/tendermint/spec/tree/master/rfc). Discussion
|
||||
Comment](https://github.com/tendermint/tendermint/tree/main/rfc). Discussion
|
||||
at the RFC stage will build collective understanding of the dimensions
|
||||
of the problems and help structure conversations around trade-offs.
|
||||
|
||||
@@ -106,40 +106,22 @@ specify exactly the dependency you want to update, eg.
|
||||
|
||||
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/gogo/protobuf) to generate code for use across Tendermint Core.
|
||||
|
||||
For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`.
|
||||
For linting and checking breaking changes, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`.
|
||||
|
||||
There are two ways to generate your proto stubs.
|
||||
We use [Docker](https://www.docker.com/) to generate the protobuf stubs. To generate the stubs yourself, make sure docker is running then run `make proto-gen`.
|
||||
|
||||
1. Use Docker, pull an image that will generate your proto stubs with no need to install anything. `make proto-gen-docker`
|
||||
2. Run `make proto-gen` after installing `buf` and `gogoproto`, you can do this by running `make protobuf`.
|
||||
## Vagrant
|
||||
|
||||
### Installation Instructions
|
||||
If you are a [Vagrant](https://www.vagrantup.com/) user, you can get started
|
||||
hacking Tendermint with the commands below.
|
||||
|
||||
To install `protoc`, download an appropriate release (<https://github.com/protocolbuffers/protobuf>) and then move the provided binaries into your PATH (follow instructions in README included with the download).
|
||||
|
||||
To install `gogoproto`, do the following:
|
||||
NOTE: In case you installed Vagrant in 2017, you might need to run
|
||||
`vagrant box update` to upgrade to the latest `ubuntu/xenial64`.
|
||||
|
||||
```sh
|
||||
go get github.com/gogo/protobuf/gogoproto
|
||||
cd $GOPATH/pkg/mod/github.com/gogo/protobuf@v1.3.1 # or wherever go get installs things
|
||||
make install
|
||||
```
|
||||
|
||||
You should now be able to run `make proto-gen` from inside the root Tendermint directory to generate new files from proto files.
|
||||
|
||||
### Visual Studio Code
|
||||
|
||||
If you are a VS Code user, you may want to add the following to your `.vscode/settings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"protoc": {
|
||||
"options": [
|
||||
"--proto_path=${workspaceRoot}/proto",
|
||||
"--proto_path=${workspaceRoot}/third_party/proto"
|
||||
]
|
||||
}
|
||||
}
|
||||
vagrant up
|
||||
vagrant ssh
|
||||
make test
|
||||
```
|
||||
|
||||
## Changelog
|
||||
@@ -247,36 +229,6 @@ Each PR should have one commit once it lands on `master`; this can be accomplish
|
||||
|
||||
#### Major Release
|
||||
|
||||
This major release process assumes that this release was preceded by release candidates.
|
||||
If there were no release candidates, and you'd like to cut a major release directly from master, see below.
|
||||
|
||||
1. Start on the latest RC branch (`RCx/vX.X.0`).
|
||||
2. Run integration tests.
|
||||
3. Branch off of the RC branch (`git checkout -b release-prep`) and prepare the release:
|
||||
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
||||
and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
(Squashing includes both combining all entries, as well as removing or simplifying
|
||||
any intra-RC changes. It may also help to alphabetize the entries by package name.)
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Open a PR with these changes against the RC branch (`RCx/vX.X.0`).
|
||||
5. Once these changes are on the RC branch, branch off of the RC branch again to create a release branch:
|
||||
- `git checkout RCx/vX.X.0`
|
||||
- `git checkout -b release/vX.X.0`
|
||||
6. Push a tag with prepared release details. This will trigger the actual release `vX.X.0`.
|
||||
- `git tag -a vX.X.0 -m 'Release vX.X.0'`
|
||||
- `git push origin vX.X.0`
|
||||
7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
8. Create the long-lived minor release branch `RC0/vX.X.1` for the next point release on this
|
||||
new major release series.
|
||||
|
||||
##### Major Release (from `master`)
|
||||
|
||||
1. Start on `master`
|
||||
2. Run integration tests (see `test_integrations` in Makefile)
|
||||
3. Prepare release in a pull request against `master` (to be squash merged):
|
||||
@@ -284,10 +236,11 @@ If there were no release candidates, and you'd like to cut a major release direc
|
||||
had release candidates, squash all the RC updates into one
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest
|
||||
- run `bash ./scripts/authors.sh` to get a list of authors since the latest
|
||||
release, and add the github aliases of external contributors to the top of
|
||||
the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump TMVersionDefault version in `version.go`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
- Make sure all significant breaking changes are covered in `UPGRADING.md`
|
||||
@@ -295,39 +248,45 @@ If there were no release candidates, and you'd like to cut a major release direc
|
||||
4. Push a tag with prepared release details (this will trigger the release `vX.X.0`)
|
||||
- `git tag -a vX.X.x -m 'Release vX.X.x'`
|
||||
- `git push origin vX.X.x`
|
||||
5. Update the `CHANGELOG.md` file on master with the releases changelog.
|
||||
5. Update the changelog.md file on master with the releases changelog.
|
||||
6. Delete any RC branches and tags for this release (if applicable)
|
||||
|
||||
#### Minor Release (Point Releases)
|
||||
#### Minor Release
|
||||
|
||||
Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master.
|
||||
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, and
|
||||
the backport branches have names like `v0.34.x` or `v0.33.x` (literally, `x`; it is not a placeholder in this case).
|
||||
Minor releases are done differently from major releases: They are built off of long-lived release candidate branches, rather than from master.
|
||||
|
||||
As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches.
|
||||
|
||||
Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate.
|
||||
|
||||
To create a minor release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout vX.X.x`
|
||||
1. Checkout the long-lived release candidate branch: `git checkout rcX/vX.X.X`
|
||||
2. Run integration tests: `make test_integrations`
|
||||
3. Check out a new branch and prepare the release:
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during minor releases, and only field additions are valid minor changes.)
|
||||
3. Prepare the release:
|
||||
- copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- reset the `CHANGELOG_PENDING.md`
|
||||
- bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- bump ABCI protocol version in `version.go`, if necessary
|
||||
- make sure all significant breaking changes are covered in `UPGRADING.md`
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Open a PR with these changes that will land them back on `vX.X.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
4. Create a release branch `release/vX.X.x` off the release candidate branch:
|
||||
- `git checkout -b release/vX.X.x`
|
||||
- `git push -u origin release/vX.X.x`
|
||||
- Note that all branches prefixed with `release` are protected once pushed. You will need admin help to make any changes to the branch.
|
||||
5. Once the release branch has been approved, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a vX.X.x -m 'Release vX.X.x'`
|
||||
- `git push origin vX.X.x`
|
||||
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:minor` labels from the pull requests that were included in the release.
|
||||
- Do not merge the backport branch into master.
|
||||
- Do not merge the release branch into master.
|
||||
7. Delete the former long lived release candidate branch once the release has been made.
|
||||
8. Create a new release candidate branch to be used for the next release.
|
||||
|
||||
#### Backport Release
|
||||
|
||||
1. start from the existing release branch you want to backport changes to (e.g. v0.30)
|
||||
Branch to a release/vX.X.X branch locally (e.g. release/v0.30.7)
|
||||
2. Cherry pick the commit(s) that contain the changes you want to backport (usually these commits are from squash-merged PRs which were already reviewed)
|
||||
3. Follow steps 2 and 3 from [Major Release](#major-release)
|
||||
4. Push changes to release/vX.X.X branch
|
||||
5. Open a PR against the existing vX.X branch
|
||||
|
||||
#### Release Candidates
|
||||
|
||||
@@ -351,12 +310,86 @@ have distinct names from the tags/release names.)
|
||||
|
||||
## Testing
|
||||
|
||||
All repos should be hooked up to [CircleCI](https://circleci.com/).
|
||||
### Unit tests
|
||||
|
||||
If they have `.go` files in the root directory, they will be automatically
|
||||
tested by circle using `go test -v -race ./...`. If not, they will need a
|
||||
`circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and
|
||||
includes its continuous integration status using a badge in the `README.md`.
|
||||
Unit tests are located in `_test.go` files as directed by [the Go testing
|
||||
package](https://golang.org/pkg/testing/). If you're adding or removing a
|
||||
function, please check there's a `TestType_Method` test for it.
|
||||
|
||||
Run: `make test`
|
||||
|
||||
### Integration tests
|
||||
|
||||
Integration tests are also located in `_test.go` files. What differentiates
|
||||
them is a more complicated setup, which usually involves setting up two or more
|
||||
components.
|
||||
|
||||
Run: `make test_integrations`
|
||||
|
||||
### End-to-end tests
|
||||
|
||||
End-to-end tests are used to verify a fully integrated Tendermint network.
|
||||
|
||||
See [README](./test/e2e/README.md) for details.
|
||||
|
||||
Run:
|
||||
|
||||
```sh
|
||||
cd test/e2e && \
|
||||
make && \
|
||||
./build/runner -f networks/ci.toml
|
||||
```
|
||||
|
||||
### Maverick
|
||||
|
||||
**If you're changing the code in `consensus` package, please make sure to
|
||||
replicate all the changes in `./test/maverick/consensus`**. Maverick is a
|
||||
byzantine node used to assert that the validator gets punished for malicious
|
||||
behavior.
|
||||
|
||||
See [README](./test/maverick/README.md) for details.
|
||||
|
||||
### Model-based tests (ADVANCED)
|
||||
|
||||
*NOTE: if you're just submitting your first PR, you won't need to touch these
|
||||
most probably (99.9%)*.
|
||||
|
||||
For components, that have been [formally
|
||||
verified](https://en.wikipedia.org/wiki/Formal_verification) using
|
||||
[TLA+](https://en.wikipedia.org/wiki/TLA%2B), it may be possible to generate
|
||||
tests using a combination of the [Apalache Model
|
||||
Checker](https://apalache.informal.systems/) and [tendermint-rs testgen
|
||||
util](https://github.com/informalsystems/tendermint-rs/tree/master/testgen).
|
||||
|
||||
Now, I know there's a lot to take in. If you want to learn more, check out [
|
||||
this video](https://www.youtube.com/watch?v=aveoIMphzW8) by Andrey Kupriyanov
|
||||
& Igor Konnov.
|
||||
|
||||
At the moment, we have model-based tests for the light client, located in the
|
||||
`./light/mbt` directory.
|
||||
|
||||
Run: `cd light/mbt && go test`
|
||||
|
||||
### Fuzz tests (ADVANCED)
|
||||
|
||||
*NOTE: if you're just submitting your first PR, you won't need to touch these
|
||||
most probably (99.9%)*.
|
||||
|
||||
[Fuzz tests](https://en.wikipedia.org/wiki/Fuzzing) can be found inside the
|
||||
`./test/fuzz` directory. See [README.md](./test/fuzz/README.md) for details.
|
||||
|
||||
Run: `cd test/fuzz && make fuzz-{PACKAGE-COMPONENT}`
|
||||
|
||||
### Jepsen tests (ADVANCED)
|
||||
|
||||
*NOTE: if you're just submitting your first PR, you won't need to touch these
|
||||
most probably (99.9%)*.
|
||||
|
||||
[Jepsen](http://jepsen.io/) tests are used to verify the
|
||||
[linearizability](https://jepsen.io/consistency/models/linearizable) property
|
||||
of the Tendermint consensus. They are located in a separate repository
|
||||
-> <https://github.com/tendermint/jepsen>. Please refer to its README for more
|
||||
information.
|
||||
|
||||
### RPC Testing
|
||||
|
||||
@@ -369,4 +402,8 @@ make build-linux build-contract-tests-hooks
|
||||
make contract-tests
|
||||
```
|
||||
|
||||
This command will popup a network and check every endpoint against what has been documented
|
||||
**WARNING: these are currently broken due to <https://github.com/apiaryio/dredd>
|
||||
not supporting complete OpenAPI 3**.
|
||||
|
||||
This command will popup a network and check every endpoint against what has
|
||||
been documented.
|
||||
|
||||
@@ -1,4 +1,14 @@
|
||||
FROM alpine:3.9
|
||||
# stage 1 Generate Tendermint Binary
|
||||
FROM golang:1.18-alpine as builder
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk --no-cache add make
|
||||
COPY / /tendermint
|
||||
WORKDIR /tendermint
|
||||
RUN make build-linux
|
||||
|
||||
# stage 2
|
||||
FROM golang:1.18-alpine
|
||||
LABEL maintainer="hello@tendermint.com"
|
||||
|
||||
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json
|
||||
@@ -29,15 +39,14 @@ EXPOSE 26656 26657 26660
|
||||
|
||||
STOPSIGNAL SIGTERM
|
||||
|
||||
ARG BINARY=tendermint
|
||||
COPY $BINARY /usr/bin/tendermint
|
||||
COPY --from=builder /tendermint/build/tendermint /usr/bin/tendermint
|
||||
|
||||
# You can overwrite these before the first run to influence
|
||||
# config.json and genesis.json. Additionally, you can override
|
||||
# CMD to add parameters to `tendermint node`.
|
||||
ENV PROXY_APP=kvstore MONIKER=dockernode CHAIN_ID=dockerchain
|
||||
|
||||
COPY ./docker-entrypoint.sh /usr/local/bin/
|
||||
COPY ./DOCKER/docker-entrypoint.sh /usr/local/bin/
|
||||
|
||||
ENTRYPOINT ["docker-entrypoint.sh"]
|
||||
CMD ["node"]
|
||||
|
||||
@@ -6,7 +6,7 @@ DockerHub tags for official releases are [here](https://hub.docker.com/r/tenderm
|
||||
|
||||
Official releases can be found [here](https://github.com/tendermint/tendermint/releases).
|
||||
|
||||
The Dockerfile for tendermint is not expected to change in the near future. The master file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/master/DOCKER/Dockerfile).
|
||||
The Dockerfile for tendermint is not expected to change in the near future. The master file used for all builds can be found [here](https://raw.githubusercontent.com/tendermint/tendermint/main/DOCKER/Dockerfile).
|
||||
|
||||
Respective versioned files can be found <https://raw.githubusercontent.com/tendermint/tendermint/vX.XX.XX/DOCKER/Dockerfile> (replace the Xs with the version number).
|
||||
|
||||
@@ -20,9 +20,9 @@ Respective versioned files can be found <https://raw.githubusercontent.com/tende
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language, and securely replicates it on many machines.
|
||||
|
||||
For more background, see the [the docs](https://docs.tendermint.com/master/introduction/#quick-start).
|
||||
For more background, see the [the docs](https://docs.tendermint.com/v0.34/introduction/#quick-start).
|
||||
|
||||
To get started developing applications, see the [application developers guide](https://docs.tendermint.com/master/introduction/quick-start.html).
|
||||
To get started developing applications, see the [application developers guide](https://docs.tendermint.com/v0.34/introduction/quick-start.html).
|
||||
|
||||
## How to use this image
|
||||
|
||||
@@ -37,7 +37,7 @@ docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app
|
||||
|
||||
## Local cluster
|
||||
|
||||
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/blob/master/Makefile) and run:
|
||||
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/blob/v0.34.x/Makefile) and run:
|
||||
|
||||
```sh
|
||||
make build-linux
|
||||
@@ -49,8 +49,8 @@ Note that this will build and use a different image than the ones provided here.
|
||||
|
||||
## License
|
||||
|
||||
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/blob/master/LICENSE).
|
||||
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/blob/main/LICENSE).
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/master/CONTRIBUTING.md) for more information.
|
||||
Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/main/CONTRIBUTING.md) for more information.
|
||||
|
||||
6
LICENSE
6
LICENSE
@@ -1,5 +1,3 @@
|
||||
Tendermint Core
|
||||
License: Apache2.0
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
@@ -181,7 +179,7 @@ License: Apache2.0
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
@@ -189,7 +187,7 @@ License: Apache2.0
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016 All in Bits, Inc
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
||||
111
Makefile
111
Makefile
@@ -1,13 +1,11 @@
|
||||
#!/usr/bin/make -f
|
||||
|
||||
PACKAGES=$(shell go list ./...)
|
||||
BUILDDIR ?= $(CURDIR)/build
|
||||
OUTPUT?=build/tendermint
|
||||
|
||||
BUILD_TAGS?=tendermint
|
||||
|
||||
# If building a release, please checkout the version tag to get the correct version setting
|
||||
ifneq ($(shell git branch --show-current),)
|
||||
VERSION := unreleased-$(shell git branch --show-current)-$(shell git rev-parse HEAD)
|
||||
ifneq ($(shell git symbolic-ref -q --short HEAD),)
|
||||
VERSION := unreleased-$(shell git symbolic-ref -q --short HEAD)-$(shell git rev-parse HEAD)
|
||||
else
|
||||
VERSION := $(shell git describe)
|
||||
endif
|
||||
@@ -15,7 +13,6 @@ endif
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMCoreSemVer=$(VERSION)
|
||||
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
|
||||
HTTPS_GIT := https://github.com/tendermint/tendermint.git
|
||||
DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf
|
||||
CGO_ENABLED ?= 0
|
||||
|
||||
# handle nostrip
|
||||
@@ -58,61 +55,73 @@ LD_FLAGS += $(LDFLAGS)
|
||||
all: check build test install
|
||||
.PHONY: all
|
||||
|
||||
# The below include contains the tools.
|
||||
include tools/Makefile
|
||||
include test/Makefile
|
||||
include tests.mk
|
||||
|
||||
###############################################################################
|
||||
### Build Tendermint ###
|
||||
###############################################################################
|
||||
|
||||
build: $(BUILDDIR)/
|
||||
CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o $(BUILDDIR)/ ./cmd/tendermint/
|
||||
build:
|
||||
CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o $(OUTPUT) ./cmd/tendermint/
|
||||
.PHONY: build
|
||||
|
||||
install:
|
||||
CGO_ENABLED=$(CGO_ENABLED) go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
|
||||
.PHONY: install
|
||||
|
||||
$(BUILDDIR)/:
|
||||
mkdir -p $@
|
||||
|
||||
###############################################################################
|
||||
### Mocks ###
|
||||
###############################################################################
|
||||
|
||||
mockery:
|
||||
go generate -run="./scripts/mockery_generate.sh" ./...
|
||||
.PHONY: mockery
|
||||
|
||||
###############################################################################
|
||||
### Protobuf ###
|
||||
###############################################################################
|
||||
|
||||
proto-all: proto-gen proto-lint proto-check-breaking
|
||||
.PHONY: proto-all
|
||||
check-proto-deps:
|
||||
ifeq (,$(shell which protoc-gen-gogofaster))
|
||||
@go install github.com/gogo/protobuf/protoc-gen-gogofaster@latest
|
||||
endif
|
||||
.PHONY: check-proto-deps
|
||||
|
||||
proto-gen:
|
||||
## If you get the following error,
|
||||
## "error while loading shared libraries: libprotobuf.so.14: cannot open shared object file: No such file or directory"
|
||||
## See https://stackoverflow.com/a/25518702
|
||||
## Note the $< here is substituted for the %.proto
|
||||
## Note the $@ here is substituted for the %.pb.go
|
||||
@sh scripts/protocgen.sh
|
||||
check-proto-format-deps:
|
||||
ifeq (,$(shell which clang-format))
|
||||
$(error "clang-format is required for Protobuf formatting. See instructions for your platform on how to install it.")
|
||||
endif
|
||||
.PHONY: check-proto-format-deps
|
||||
|
||||
proto-gen: check-proto-deps
|
||||
@echo "Generating Protobuf files"
|
||||
@go run github.com/bufbuild/buf/cmd/buf generate
|
||||
@mv ./proto/tendermint/abci/types.pb.go ./abci/types/
|
||||
.PHONY: proto-gen
|
||||
|
||||
proto-gen-docker:
|
||||
@echo "Generating Protobuf files"
|
||||
@docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto sh ./scripts/protocgen.sh
|
||||
.PHONY: proto-gen-docker
|
||||
|
||||
proto-lint:
|
||||
@$(DOCKER_BUF) check lint --error-format=json
|
||||
# These targets are provided for convenience and are intended for local
|
||||
# execution only.
|
||||
proto-lint: check-proto-deps
|
||||
@echo "Linting Protobuf files"
|
||||
@go run github.com/bufbuild/buf/cmd/buf lint
|
||||
.PHONY: proto-lint
|
||||
|
||||
proto-format:
|
||||
proto-format: check-proto-format-deps
|
||||
@echo "Formatting Protobuf files"
|
||||
docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto find ./ -not -path "./third_party/*" -name *.proto -exec clang-format -i {} \;
|
||||
@find . -name '*.proto' -path "./proto/*" -exec clang-format -i {} \;
|
||||
.PHONY: proto-format
|
||||
|
||||
proto-check-breaking:
|
||||
@$(DOCKER_BUF) check breaking --against-input .git#branch=master
|
||||
proto-check-breaking: check-proto-deps
|
||||
@echo "Checking for breaking changes in Protobuf files against local branch"
|
||||
@echo "Note: This is only useful if your changes have not yet been committed."
|
||||
@echo " Otherwise read up on buf's \"breaking\" command usage:"
|
||||
@echo " https://docs.buf.build/breaking/usage"
|
||||
@go run github.com/bufbuild/buf/cmd/buf breaking --against ".git"
|
||||
.PHONY: proto-check-breaking
|
||||
|
||||
proto-check-breaking-ci:
|
||||
@$(DOCKER_BUF) check breaking --against-input $(HTTPS_GIT)#branch=master
|
||||
@go run github.com/bufbuild/buf/cmd/buf breaking --against $(HTTPS_GIT)#branch=v0.34.x
|
||||
.PHONY: proto-check-breaking-ci
|
||||
|
||||
###############################################################################
|
||||
@@ -155,7 +164,7 @@ draw_deps:
|
||||
|
||||
get_deps_bin_size:
|
||||
@# Copy of build recipe with additional flags to perform binary size analysis
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(BUILDDIR)/ ./cmd/tendermint/ 2>&1))
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/ 2>&1))
|
||||
@find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log
|
||||
@echo "Results can be found here: $(CURDIR)/deps_bin_size.log"
|
||||
.PHONY: get_deps_bin_size
|
||||
@@ -191,7 +200,7 @@ format:
|
||||
|
||||
lint:
|
||||
@echo "--> Running linter"
|
||||
@golangci-lint run
|
||||
@go run github.com/golangci/golangci-lint/cmd/golangci-lint run
|
||||
.PHONY: lint
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
@@ -199,23 +208,31 @@ DESTINATION = ./index.html.md
|
||||
###############################################################################
|
||||
### Documentation ###
|
||||
###############################################################################
|
||||
# todo remove once tendermint.com DNS is solved
|
||||
|
||||
build-docs:
|
||||
@cd docs && \
|
||||
while read -r branch path_prefix; do \
|
||||
(git checkout $${branch} && npm install && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \
|
||||
(git checkout $${branch} && npm ci && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \
|
||||
mkdir -p ~/output/$${path_prefix} ; \
|
||||
cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \
|
||||
cp ~/output/$${path_prefix}/index.html ~/output ; \
|
||||
done < versions ;
|
||||
.PHONY: build-docs
|
||||
|
||||
sync-docs:
|
||||
cd ~/output && \
|
||||
echo "role_arn = ${DEPLOYMENT_ROLE_ARN}" >> /root/.aws/config ; \
|
||||
echo "CI job = ${CIRCLE_BUILD_URL}" >> version.html ; \
|
||||
aws s3 sync . s3://${WEBSITE_BUCKET} --profile terraform --delete ; \
|
||||
aws cloudfront create-invalidation --distribution-id ${CF_DISTRIBUTION_ID} --profile terraform --path "/*" ;
|
||||
.PHONY: sync-docs
|
||||
|
||||
###############################################################################
|
||||
### Docker image ###
|
||||
###############################################################################
|
||||
|
||||
build-docker: build-linux
|
||||
cp $(BUILDDIR)/tendermint DOCKER/tendermint
|
||||
cp $(OUTPUT) DOCKER/tendermint
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" DOCKER
|
||||
rm -rf DOCKER/tendermint
|
||||
.PHONY: build-docker
|
||||
@@ -225,7 +242,7 @@ build-docker: build-linux
|
||||
###############################################################################
|
||||
|
||||
# Build linux binary on other platforms
|
||||
build-linux: tools
|
||||
build-linux:
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||
.PHONY: build-linux
|
||||
|
||||
@@ -269,17 +286,3 @@ endif
|
||||
contract-tests:
|
||||
dredd
|
||||
.PHONY: contract-tests
|
||||
|
||||
clean:
|
||||
rm -rf $(CURDIR)/artifacts/ $(BUILDDIR)/
|
||||
|
||||
build-reproducible:
|
||||
docker rm latest-build || true
|
||||
docker run --volume=$(CURDIR):/sources:ro \
|
||||
--env TARGET_PLATFORMS='linux/amd64 linux/arm64 darwin/amd64 windows/amd64' \
|
||||
--env APP=tendermint \
|
||||
--env COMMIT=$(shell git rev-parse --short=8 HEAD) \
|
||||
--env VERSION=$(shell git describe --tags) \
|
||||
--name latest-build cosmossdk/rbuilder:latest
|
||||
docker cp -a latest-build:/home/builder/artifacts/ $(CURDIR)/
|
||||
.PHONY: build-reproducible
|
||||
|
||||
158
PHILOSOPHY.md
Normal file
158
PHILOSOPHY.md
Normal file
@@ -0,0 +1,158 @@
|
||||
# Design goals
|
||||
|
||||
The design goals for Tendermint (and the SDK and related libraries) are:
|
||||
|
||||
* Simplicity and Legibility
|
||||
* Parallel performance, namely ability to utilize multicore architecture
|
||||
* Ability to evolve the codebase bug-free
|
||||
* Debuggability
|
||||
* Complete correctness that considers all edge cases, esp in concurrency
|
||||
* Future-proof modular architecture, message protocol, APIs, and encapsulation
|
||||
|
||||
|
||||
## Justification
|
||||
|
||||
Legibility is key to maintaining bug-free software as it evolves toward more
|
||||
optimizations, more ease of debugging, and additional features.
|
||||
|
||||
It is too easy to introduce bugs over time by replacing lines of code with
|
||||
those that may panic, which means ideally locks are unlocked by defer
|
||||
statements.
|
||||
|
||||
For example,
|
||||
|
||||
```go
|
||||
func (obj *MyObj) something() {
|
||||
mtx.Lock()
|
||||
obj.something = other
|
||||
mtx.Unlock()
|
||||
}
|
||||
```
|
||||
|
||||
It is too easy to refactor the codebase in the future to replace `other` with
|
||||
`other.String()` for example, and this may introduce a bug that causes a
|
||||
deadlock. So as much as reasonably possible, we need to be using defer
|
||||
statements, even though it introduces additional overhead.
|
||||
|
||||
If it is necessary to optimize the unlocking of mutex locks, the solution is
|
||||
more modularity via smaller functions, so that defer'd unlocks are scoped
|
||||
within a smaller function.
|
||||
|
||||
Similarly, idiomatic for-loops should always be preferred over those that use
|
||||
custom counters, because it is too easy to evolve the body of a for-loop to
|
||||
become more complicated over time, and it becomes more and more difficult to
|
||||
assess the correctness of such a for-loop by visual inspection.
|
||||
|
||||
|
||||
## On performance
|
||||
|
||||
It doesn't matter whether there are alternative implementations that are 2x or
|
||||
3x more performant, when the software doesn't work, deadlocks, or if bugs
|
||||
cannot be debugged. By taking advantage of multicore concurrency, the
|
||||
Tendermint implementation will at least be an order of magnitude within the
|
||||
range of what is theoretically possible. The design philosophy of Tendermint,
|
||||
and the choice of Go as implementation language, is designed to make Tendermint
|
||||
implementation the standard specification for concurrent BFT software.
|
||||
|
||||
By focusing on the message protocols (e.g. ABCI, p2p messages), and
|
||||
encapsulation e.g. IAVL module, (relatively) independent reactors, we are both
|
||||
implementing a standard implementation to be used as the specification for
|
||||
future implementations in more optimizable languages like Rust, Java, and C++;
|
||||
as well as creating sufficiently performant software. Tendermint Core will
|
||||
never be as fast as future implementations of the Tendermint Spec, because Go
|
||||
isn't designed to be as fast as possible. The advantage of using Go is that we
|
||||
can develop the whole stack of modular components **faster** than in other
|
||||
languages.
|
||||
|
||||
Furthermore, the real bottleneck is in the application layer, and it isn't
|
||||
necessary to support more than a sufficiently decentralized set of validators
|
||||
(e.g. 100 ~ 300 validators is sufficient, with delegated bonded PoS).
|
||||
|
||||
Instead of optimizing Tendermint performance down to the metal, lets focus on
|
||||
optimizing on other matters, namely ability to push feature complete software
|
||||
that works well enough, can be debugged and maintained, and can serve as a spec
|
||||
for future implementations.
|
||||
|
||||
|
||||
## On encapsulation
|
||||
|
||||
In order to create maintainable, forward-optimizable software, it is critical
|
||||
to develop well-encapsulated objects that have well understood properties, and
|
||||
to re-use these easy-to-use-correctly components as building blocks for further
|
||||
encapsulated meta-objects.
|
||||
|
||||
For example, mutexes are cheap enough for Tendermint's design goals when there
|
||||
isn't goroutine contention, so it is encouraged to create concurrency safe
|
||||
structures with struct-level mutexes. If they are used in the context of
|
||||
non-concurrent logic, then the performance is good enough. If they are used in
|
||||
the context of concurrent logic, then it will still perform correctly.
|
||||
|
||||
Examples of this design principle can be seen in the types.ValidatorSet struct,
|
||||
and the rand.Rand struct. It's one single struct declaration that can be used
|
||||
in both concurrent and non-concurrent logic, and due to its well encapsulation,
|
||||
it's easy to get the usage of the mutex right.
|
||||
|
||||
### example: rand.Rand
|
||||
|
||||
`The default Source is safe for concurrent use by multiple goroutines, but
|
||||
Sources created by NewSource are not`. The reason why the default
|
||||
package-level source is safe for concurrent use is because it is protected (see
|
||||
`lockedSource` in <https://golang.org/src/math/rand/rand.go>).
|
||||
|
||||
But we shouldn't rely on the global source, we should be creating our own
|
||||
Rand/Source instances and using them, especially for determinism in testing.
|
||||
So it is reasonable to have rand.Rand be protected by a mutex. Whether we want
|
||||
our own implementation of Rand is another question, but the answer there is
|
||||
also in the affirmative. Sometimes you want to know where Rand is being used
|
||||
in your code, so it becomes a simple matter of dropping in a log statement to
|
||||
inject inspectability into Rand usage. Also, it is nice to be able to extend
|
||||
the functionality of Rand with custom methods. For these reasons, and for the
|
||||
reasons which is outlined in this design philosophy document, we should
|
||||
continue to use the rand.Rand object, with mutex protection.
|
||||
|
||||
Another key aspect of good encapsulation is the choice of exposed vs unexposed
|
||||
methods. It should be clear to the reader of the code, which methods are
|
||||
intended to be used in what context, and what safe usage is. Part of this is
|
||||
solved by hiding methods via unexported methods. Another part of this is
|
||||
naming conventions on the methods (e.g. underscores) with good documentation,
|
||||
and code organization. If there are too many exposed methods and it isn't
|
||||
clear what methods have what side effects, then there is something wrong about
|
||||
the design of abstractions that should be revisited.
|
||||
|
||||
|
||||
## On concurrency
|
||||
|
||||
In order for Tendermint to remain relevant in the years to come, it is vital
|
||||
for Tendermint to take advantage of multicore architectures. Due to the nature
|
||||
of the problem, namely consensus across a concurrent p2p gossip network, and to
|
||||
handle RPC requests for a large number of consuming subscribers, it is
|
||||
unavoidable for Tendermint development to require expertise in concurrency
|
||||
design, especially when it comes to the reactor design, and also for RPC
|
||||
request handling.
|
||||
|
||||
|
||||
# Guidelines
|
||||
|
||||
Here are some guidelines for designing for (sufficient) performance and concurrency:
|
||||
|
||||
* Mutex locks are cheap enough when there isn't contention.
|
||||
* Do not optimize code without analytical or observed proof that it is in a hot path.
|
||||
* Don't over-use channels when mutex locks w/ encapsulation are sufficient.
|
||||
* The need to drain channels are often a hint of unconsidered edge cases.
|
||||
* The creation of O(N) one-off goroutines is generally technical debt that
|
||||
needs to get addressed sooner than later. Avoid creating too many
|
||||
goroutines as a patch around incomplete concurrency design, or at least be
|
||||
aware of the debt and do not invest in the debt. On the other hand, Tendermint
|
||||
is designed to have a limited number of peers (e.g. 10 or 20), so the creation
|
||||
of O(C) goroutines per O(P) peers is still O(C\*P=constant).
|
||||
* Use defer statements to unlock as much as possible. If you want to unlock sooner,
|
||||
try to create more modular functions that do make use of defer statements.
|
||||
|
||||
# Mantras
|
||||
|
||||
* Premature optimization kills
|
||||
* Readability is paramount
|
||||
* Beautiful is better than fast.
|
||||
* In the face of ambiguity, refuse the temptation to guess.
|
||||
* In the face of bugs, refuse the temptation to cover the bug.
|
||||
* There should be one-- and preferably only one --obvious way to do it.
|
||||
204
README.md
204
README.md
@@ -1,153 +1,138 @@
|
||||
# Tendermint
|
||||
|
||||
_UPDATE: TendermintCore featureset is frozen for LTS, see issue https://github.com/tendermint/tendermint/issues/9972_<br/>
|
||||
_This is the latest stable release used by cosmoshub-4, version 0.34.24_<br/>
|
||||
_The previous main branch (v0.38.xx) can now be found under "main_backup"_<br/>
|
||||
|
||||

|
||||
|
||||
[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance)
|
||||
[State Machines](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for short.
|
||||
[Byzantine-Fault Tolerant][bft] [State Machine Replication][smr]. Or
|
||||
[Blockchain], for short.
|
||||
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
[](https://pkg.go.dev/github.com/tendermint/tendermint)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://discord.gg/AzefAFd)
|
||||
[](https://github.com/tendermint/tendermint/blob/master/LICENSE)
|
||||
[](https://github.com/tendermint/tendermint)
|
||||
[](https://sourcegraph.com/github.com/tendermint/tendermint?badge)
|
||||
[![Version][version-badge]][version-url]
|
||||
[![API Reference][api-badge]][api-url]
|
||||
[![Go version][go-badge]][go-url]
|
||||
[![Discord chat][discord-badge]][discord-url]
|
||||
[![License][license-badge]][license-url]
|
||||
[![Sourcegraph][sg-badge]][sg-url]
|
||||
|
||||
| Branch | Tests | Coverage | Linting |
|
||||
|--------|--------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------|
|
||||
| master |  | [](https://codecov.io/gh/tendermint/tendermint) |  |
|
||||
| Branch | Tests | Linting |
|
||||
|--------|------------------------------------|---------------------------------|
|
||||
| main | [![Tests][tests-badge]][tests-url] | [![Lint][lint-badge]][lint-url] |
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
|
||||
and securely replicates it on many machines.
|
||||
Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a
|
||||
state transition machine - written in any programming language - and securely
|
||||
replicates it on many machines.
|
||||
|
||||
For protocol details, see [the specification](https://github.com/tendermint/spec).
|
||||
For protocol details, refer to the [Tendermint Specification](./spec/README.md).
|
||||
|
||||
For detailed analysis of the consensus protocol, including safety and liveness proofs,
|
||||
see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
|
||||
For detailed analysis of the consensus protocol, including safety and liveness
|
||||
proofs, read our paper, "[The latest gossip on BFT
|
||||
consensus](https://arxiv.org/abs/1807.04938)".
|
||||
|
||||
## Documentation
|
||||
|
||||
Complete documentation can be found on the
|
||||
[website](https://docs.tendermint.com/).
|
||||
|
||||
## Releases
|
||||
|
||||
Please do not depend on master as your production branch. Use [releases](https://github.com/tendermint/tendermint/releases) instead.
|
||||
Please do not depend on `main` as your production branch. Use
|
||||
[releases](https://github.com/tendermint/tendermint/releases) instead.
|
||||
|
||||
Tendermint is being used in production in both private and public environments,
|
||||
most notably the blockchains of the [Cosmos Network](https://cosmos.network/).
|
||||
However, we are still making breaking changes to the protocol and the APIs and have not yet released v1.0.
|
||||
See below for more details about [versioning](#versioning).
|
||||
Tendermint has been in the production of private and public environments, most
|
||||
notably the blockchains of the Cosmos Network. we haven't released v1.0 yet
|
||||
since we are making breaking changes to the protocol and the APIs. See below for
|
||||
more details about [versioning](#versioning).
|
||||
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help. You can
|
||||
contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/AzefAFd).
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help.
|
||||
You can contact us [over email](mailto:hello@newtendermint.org) or [join the
|
||||
chat](https://discord.gg/gnoland).
|
||||
|
||||
More on how releases are conducted can be found [here](./RELEASES.md).
|
||||
|
||||
## Security
|
||||
|
||||
To report a security vulnerability, see our [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
To report a security vulnerability, please [email us](mailto:security@newtendermint.org).
|
||||
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md).
|
||||
|
||||
## Minimum requirements
|
||||
|
||||
| Requirement | Notes |
|
||||
|-------------|------------------|
|
||||
| Go version | Go1.15 or higher |
|
||||
|
||||
## Documentation
|
||||
|
||||
Complete documentation can be found on the [website](https://docs.tendermint.com/master/).
|
||||
| Requirement | Notes |
|
||||
|-------------|-------------------|
|
||||
| Go version | Go 1.18 or higher |
|
||||
|
||||
### Install
|
||||
|
||||
See the [install instructions](/docs/introduction/install.md).
|
||||
See the [install instructions](./docs/introduction/install.md).
|
||||
|
||||
### Quick Start
|
||||
|
||||
- [Single node](/docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](/docs/networks/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](/docs/networks/terraform-and-ansible.md)
|
||||
- [Join the Cosmos testnet](https://cosmos.network/testnet)
|
||||
- [Single node](./docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](./docs/tools/docker-compose.md)
|
||||
- [Remote cluster using Terraform and Ansible](./docs/tools/terraform-and-ansible.md)
|
||||
|
||||
## Contributing
|
||||
|
||||
Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions.
|
||||
|
||||
Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md)
|
||||
and the [style guide](STYLE_GUIDE.md). You may also find it helpful to read the
|
||||
[specifications](https://github.com/tendermint/spec), watch the [Developer Sessions](/docs/DEV_SESSIONS.md),
|
||||
and familiarize yourself with our
|
||||
[Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture).
|
||||
Before contributing to the project, please take a look at the [contributing
|
||||
guidelines](CONTRIBUTING.md) and the [style guide](STYLE_GUIDE.md). You may also
|
||||
find it helpful to read the [specifications](./spec/README.md), and familiarize
|
||||
yourself with our [Architectural Decision Records
|
||||
(ADRs)](./docs/architecture/README.md) and
|
||||
[Request For Comments (RFCs)](./docs/rfc/README.md).
|
||||
|
||||
## Versioning
|
||||
|
||||
### Semantic Versioning
|
||||
|
||||
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and how the version changes.
|
||||
According to SemVer, anything in the public API can change at any time before version 1.0.0
|
||||
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and
|
||||
how the version changes. According to SemVer, anything in the public API can
|
||||
change at any time before version 1.0.0
|
||||
|
||||
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
|
||||
to signal breaking changes across a subset of the total public API. This subset includes all
|
||||
interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not
|
||||
include the Go APIs.
|
||||
To provide some stability to users of 0.X.X versions of Tendermint, the MINOR
|
||||
version is used to signal breaking changes across Tendermint's API. This API
|
||||
includes all publicly exposed types, functions, and methods in non-internal Go
|
||||
packages as well as the types and methods accessible via the Tendermint RPC
|
||||
interface.
|
||||
|
||||
That said, breaking changes in the following packages will be documented in the
|
||||
CHANGELOG even if they don't lead to MINOR version bumps:
|
||||
|
||||
- crypto
|
||||
- config
|
||||
- libs
|
||||
- bits
|
||||
- bytes
|
||||
- json
|
||||
- log
|
||||
- math
|
||||
- net
|
||||
- os
|
||||
- protoio
|
||||
- rand
|
||||
- sync
|
||||
- strings
|
||||
- service
|
||||
- node
|
||||
- rpc/client
|
||||
- types
|
||||
Breaking changes to these public APIs will be documented in the CHANGELOG.
|
||||
|
||||
### Upgrades
|
||||
|
||||
In an effort to avoid accumulating technical debt prior to 1.0.0,
|
||||
we do not guarantee that breaking changes (ie. bumps in the MINOR version)
|
||||
will work with existing Tendermint blockchains. In these cases you will
|
||||
have to start a new blockchain, or write something custom to get the old
|
||||
data into the new chain. However, any bump in the PATCH version should be
|
||||
compatible with existing blockchain histories.
|
||||
|
||||
In an effort to avoid accumulating technical debt prior to 1.0.0, we do not
|
||||
guarantee that breaking changes (ie. bumps in the MINOR version) will work with
|
||||
existing Tendermint blockchains. In these cases you will have to start a new
|
||||
blockchain, or write something custom to get the old data into the new chain.
|
||||
However, any bump in the PATCH version should be compatible with existing
|
||||
blockchain histories.
|
||||
|
||||
For more information on upgrading, see [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
### Supported Versions
|
||||
|
||||
Because we are a small core team, we only ship patch updates, including security updates,
|
||||
to the most recent minor release and the second-most recent minor release. Consequently,
|
||||
we strongly recommend keeping Tendermint up-to-date. Upgrading instructions can be found
|
||||
in [UPGRADING.md](./UPGRADING.md).
|
||||
Because we are a small core team, we only ship patch updates, including security
|
||||
updates, to the most recent minor release and the second-most recent minor
|
||||
release. Consequently, we strongly recommend keeping Tendermint up-to-date.
|
||||
Upgrading instructions can be found in [UPGRADING.md](./UPGRADING.md).
|
||||
|
||||
## Resources
|
||||
|
||||
### Tendermint Core
|
||||
### Libraries
|
||||
|
||||
For details about the blockchain data structures and the p2p protocols, see the
|
||||
[Tendermint specification](https://docs.tendermint.com/master/spec/).
|
||||
|
||||
For details on using the software, see the [documentation](/docs/) which is also
|
||||
hosted at: <https://docs.tendermint.com/master/>
|
||||
|
||||
### Tools
|
||||
|
||||
Benchmarking is provided by [`tm-load-test`](https://github.com/informalsystems/tm-load-test).
|
||||
Additional tooling can be found in [/docs/tools](/docs/tools).
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); A framework for building
|
||||
applications in Golang
|
||||
- [Tendermint in Rust](https://github.com/informalsystems/tendermint-rs)
|
||||
- [ABCI Tower](https://github.com/penumbra-zone/tower-abci)
|
||||
|
||||
### Applications
|
||||
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
||||
- [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
|
||||
- [Many more](https://tendermint.com/ecosystem)
|
||||
- [Cosmos Hub](https://hub.cosmos.network/)
|
||||
- [Terra](https://www.terra.money/)
|
||||
- [Celestia](https://celestia.org/)
|
||||
- [Anoma](https://anoma.network/)
|
||||
- [Vocdoni](https://docs.vocdoni.io/)
|
||||
|
||||
### Research
|
||||
|
||||
@@ -156,3 +141,30 @@ Additional tooling can be found in [/docs/tools](/docs/tools).
|
||||
- [Original Whitepaper: "Tendermint: Consensus Without Mining"](https://tendermint.com/static/docs/tendermint.pdf)
|
||||
- [Tendermint Core Blog](https://medium.com/tendermint/tagged/tendermint-core)
|
||||
- [Cosmos Blog](https://blog.cosmos.network/tendermint/home)
|
||||
|
||||
## Join us!
|
||||
|
||||
The development of Tendermint Core was led primarily by All in Bits, Inc. The
|
||||
Tendermint trademark is owned by New Tendermint, LLC. If you'd like to work
|
||||
full-time on Tendermint2 or [gno.land](https://gno.land), [we're
|
||||
hiring](mailto:hiring@newtendermint.org)!
|
||||
|
||||
[bft]: https://en.wikipedia.org/wiki/Byzantine_fault_tolerance
|
||||
[smr]: https://en.wikipedia.org/wiki/State_machine_replication
|
||||
[Blockchain]: https://en.wikipedia.org/wiki/Blockchain
|
||||
[version-badge]: https://img.shields.io/github/tag/tendermint/tendermint.svg
|
||||
[version-url]: https://github.com/tendermint/tendermint/releases/latest
|
||||
[api-badge]: https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667
|
||||
[api-url]: https://pkg.go.dev/github.com/tendermint/tendermint
|
||||
[go-badge]: https://img.shields.io/badge/go-1.18-blue.svg
|
||||
[go-url]: https://github.com/moovweb/gvm
|
||||
[discord-badge]: https://img.shields.io/discord/669268347736686612.svg
|
||||
[discord-url]: https://discord.gg/cosmosnetwork
|
||||
[license-badge]: https://img.shields.io/github/license/tendermint/tendermint.svg
|
||||
[license-url]: https://github.com/tendermint/tendermint/blob/main/LICENSE
|
||||
[sg-badge]: https://sourcegraph.com/github.com/tendermint/tendermint/-/badge.svg
|
||||
[sg-url]: https://sourcegraph.com/github.com/tendermint/tendermint?badge
|
||||
[tests-url]: https://github.com/tendermint/tendermint/actions/workflows/tests.yml
|
||||
[tests-badge]: https://github.com/tendermint/tendermint/actions/workflows/tests.yml/badge.svg?branch=main
|
||||
[lint-badge]: https://github.com/tendermint/tendermint/actions/workflows/lint.yml/badge.svg
|
||||
[lint-url]: https://github.com/tendermint/tendermint/actions/workflows/lint.yml
|
||||
|
||||
153
SECURITY.md
153
SECURITY.md
@@ -2,98 +2,146 @@
|
||||
|
||||
## Reporting a Bug
|
||||
|
||||
As part of our [Coordinated Vulnerability Disclosure
|
||||
Policy](https://tendermint.com/security), we operate a [bug
|
||||
bounty](https://hackerone.com/tendermint).
|
||||
See the policy for more details on submissions and rewards, and see "Example Vulnerabilities" (below) for examples of the kinds of bugs we're most interested in.
|
||||
As part of our [Coordinated Vulnerability Disclosure Policy](https://tendermint.com/security),
|
||||
we operate a [bug bounty][hackerone]. See the policy for more
|
||||
details on submissions and rewards, and see "Example Vulnerabilities" (below)
|
||||
for examples of the kinds of bugs we're most interested in.
|
||||
|
||||
### Guidelines
|
||||
### Guidelines
|
||||
|
||||
We require that all researchers:
|
||||
|
||||
* Use the bug bounty to disclose all vulnerabilities, and avoid posting vulnerability information in public places, including Github Issues, Discord channels, and Telegram groups
|
||||
* Make every effort to avoid privacy violations, degradation of user experience, disruption to production systems (including but not limited to the Cosmos Hub), and destruction of data
|
||||
* Keep any information about vulnerabilities that you’ve discovered confidential between yourself and the Tendermint Core engineering team until the issue has been resolved and disclosed
|
||||
* Use the bug bounty to disclose all vulnerabilities, and avoid posting
|
||||
vulnerability information in public places, including Github Issues, Discord
|
||||
channels, and Telegram groups
|
||||
* Make every effort to avoid privacy violations, degradation of user experience,
|
||||
disruption to production systems (including but not limited to the Cosmos
|
||||
Hub), and destruction of data
|
||||
* Keep any information about vulnerabilities that you’ve discovered confidential
|
||||
between yourself and the Tendermint Core engineering team until the issue has
|
||||
been resolved and disclosed
|
||||
* Avoid posting personally identifiable information, privately or publicly
|
||||
|
||||
If you follow these guidelines when reporting an issue to us, we commit to:
|
||||
|
||||
* Not pursue or support any legal action related to your research on this vulnerability
|
||||
* Work with you to understand, resolve and ultimately disclose the issue in a timely fashion
|
||||
* Not pursue or support any legal action related to your research on this
|
||||
vulnerability
|
||||
* Work with you to understand, resolve and ultimately disclose the issue in a
|
||||
timely fashion
|
||||
|
||||
## Disclosure Process
|
||||
## Disclosure Process
|
||||
|
||||
Tendermint Core uses the following disclosure process:
|
||||
|
||||
1. Once a security report is received, the Tendermint Core team works to verify the issue and confirm its severity level using CVSS.
|
||||
2. The Tendermint Core team collaborates with the Gaia team to determine the vulnerability’s potential impact on the Cosmos Hub.
|
||||
3. Patches are prepared for eligible releases of Tendermint in private repositories. See “Supported Releases” below for more information on which releases are considered eligible.
|
||||
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE Numbering Authority.
|
||||
5. We notify the community that a security release is coming, to give users time to prepare their systems for the update. Notifications can include forum posts, tweets, and emails to partners and validators, including emails sent to the [Tendermint Security Mailing List](https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc).
|
||||
6. 24 hours following this notification, the fixes are applied publicly and new releases are issued.
|
||||
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these releases, and then themselves issue new releases.
|
||||
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we notify the community, again, through the same channels as above. We also publish a Security Advisory on Github and publish the CVE, as long as neither the Security Advisory nor the CVE include any information on how to exploit these vulnerabilities beyond what information is already available in the patch itself.
|
||||
9. Once the community is notified, we will pay out any relevant bug bounties to submitters.
|
||||
10. One week after the releases go out, we will publish a post with further details on the vulnerability as well as our response to it.
|
||||
1. Once a security report is received, the Tendermint Core team works to verify
|
||||
the issue and confirm its severity level using CVSS.
|
||||
2. The Tendermint Core team collaborates with the Gaia team to determine the
|
||||
vulnerability’s potential impact on the Cosmos Hub.
|
||||
3. Patches are prepared for eligible releases of Tendermint in private
|
||||
repositories. See “Supported Releases” below for more information on which
|
||||
releases are considered eligible.
|
||||
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE
|
||||
Numbering Authority.
|
||||
5. We notify the community that a security release is coming, to give users time
|
||||
to prepare their systems for the update. Notifications can include forum
|
||||
posts, tweets, and emails to partners and validators, including emails sent
|
||||
to the [Tendermint Security Mailing List][tmsec-mailing].
|
||||
6. 24 hours following this notification, the fixes are applied publicly and new
|
||||
releases are issued.
|
||||
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these
|
||||
releases, and then themselves issue new releases.
|
||||
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we
|
||||
notify the community, again, through the same channels as above. We also
|
||||
publish a Security Advisory on Github and publish the CVE, as long as neither
|
||||
the Security Advisory nor the CVE include any information on how to exploit
|
||||
these vulnerabilities beyond what information is already available in the
|
||||
patch itself.
|
||||
9. Once the community is notified, we will pay out any relevant bug bounties to
|
||||
submitters.
|
||||
10. One week after the releases go out, we will publish a post with further
|
||||
details on the vulnerability as well as our response to it.
|
||||
|
||||
This process can take some time. Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow the process described above to ensure that disclosures are handled consistently and to keep Tendermint Core and its downstream dependent projects--including but not limited to Gaia and the Cosmos Hub--as secure as possible.
|
||||
This process can take some time. Every effort will be made to handle the bug in
|
||||
as timely a manner as possible, however it's important that we follow the
|
||||
process described above to ensure that disclosures are handled consistently and
|
||||
to keep Tendermint Core and its downstream dependent projects--including but not
|
||||
limited to Gaia and the Cosmos Hub--as secure as possible.
|
||||
|
||||
### Example Timeline
|
||||
### Example Timeline
|
||||
|
||||
The following is an example timeline for the triage and response. The required roles and team members are described in parentheses after each task; however, multiple people can play each role and each person may play multiple roles.
|
||||
The following is an example timeline for the triage and response. The required
|
||||
roles and team members are described in parentheses after each task; however,
|
||||
multiple people can play each role and each person may play multiple roles.
|
||||
|
||||
#### > 24 Hours Before Release Time
|
||||
#### 24+ Hours Before Release Time
|
||||
|
||||
1. Request CVE number (ADMIN)
|
||||
2. Gather emails and other contact info for validators (COMMS LEAD)
|
||||
3. Test fixes on a testnet (TENDERMINT ENG, COSMOS ENG)
|
||||
4. Write “Security Advisory” for forum (TENDERMINT LEAD)
|
||||
1. Request CVE number (ADMIN)
|
||||
2. Gather emails and other contact info for validators (COMMS LEAD)
|
||||
3. Create patches in a private security repo, and ensure that PRs are open
|
||||
targeting all relevant release branches (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
4. Test fixes on a testnet (TENDERMINT ENG, COSMOS SDK ENG)
|
||||
5. Write “Security Advisory” for forum (TENDERMINT LEAD)
|
||||
|
||||
#### 24 Hours Before Release Time
|
||||
|
||||
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
|
||||
2. Post Tweet linking to forum post (COMMS LEAD)
|
||||
3. Announce security advisory/link to post in various other social channels (Telegram, Discord) (COMMS LEAD)
|
||||
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
|
||||
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
|
||||
2. Post Tweet linking to forum post (COMMS LEAD)
|
||||
3. Announce security advisory/link to post in various other social channels
|
||||
(Telegram, Discord) (COMMS LEAD)
|
||||
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
|
||||
|
||||
#### Release Time
|
||||
|
||||
1. Cut Tendermint releases for eligible versions (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
1. Cut Tendermint releases for eligible versions (TENDERMINT ENG, TENDERMINT
|
||||
LEAD)
|
||||
2. Cut Cosmos SDK release for eligible versions (COSMOS ENG)
|
||||
3. Cut Gaia release for eligible versions (GAIA ENG)
|
||||
4. Post “Security releases” on forum (TENDERMINT LEAD)
|
||||
5. Post new Tweet linking to forum post (COMMS LEAD)
|
||||
6. Remind everyone via social channels (Telegram, Discord) that the release is out (COMMS LEAD)
|
||||
7. Send emails to validators or other users (COMMS LEAD)
|
||||
8. Publish Security Advisory and CVE, if CVE has no sensitive information (ADMIN)
|
||||
6. Remind everyone via social channels (Telegram, Discord) that the release is
|
||||
out (COMMS LEAD)
|
||||
7. Send emails to validators or other users (COMMS LEAD)
|
||||
8. Publish Security Advisory and CVE, if CVE has no sensitive information
|
||||
(ADMIN)
|
||||
|
||||
#### After Release Time
|
||||
|
||||
1. Write forum post with exploit details (TENDERMINT LEAD)
|
||||
2. Approve pay-out on HackerOne for submitter (ADMIN)
|
||||
2. Approve pay-out on HackerOne for submitter (ADMIN)
|
||||
|
||||
#### 7 Days After Release Time
|
||||
|
||||
1. Publish CVE if it has not yet been published (ADMIN)
|
||||
1. Publish CVE if it has not yet been published (ADMIN)
|
||||
2. Publish forum post with exploit details (TENDERMINT ENG, TENDERMINT LEAD)
|
||||
|
||||
## Supported Releases
|
||||
|
||||
The Tendermint Core team commits to releasing security patch releases for both the latest minor release as well for the major/minor release that the Cosmos Hub is running.
|
||||
The Tendermint Core team commits to releasing security patch releases for both
|
||||
the latest minor release as well for the major/minor release that the Cosmos Hub
|
||||
is running.
|
||||
|
||||
If you are running older versions of Tendermint Core, we encourage you to upgrade at your earliest opportunity so that you can receive security patches directly from the Tendermint repo. While you are welcome to backport security patches to older versions for your own use, we will not publish or promote these backports.
|
||||
If you are running older versions of Tendermint Core, we encourage you to
|
||||
upgrade at your earliest opportunity so that you can receive security patches
|
||||
directly from the Tendermint repo. While you are welcome to backport security
|
||||
patches to older versions for your own use, we will not publish or promote these
|
||||
backports.
|
||||
|
||||
## Scope
|
||||
|
||||
The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/tendermint). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope:
|
||||
The full scope of our bug bounty program is outlined on our
|
||||
[Hacker One program page][hackerone]. Please also note that, in the interest of
|
||||
the safety of our users and staff, a few things are explicitly excluded from
|
||||
scope:
|
||||
|
||||
* Any third-party services
|
||||
* Findings from physical testing, such as office access
|
||||
* Any third-party services
|
||||
* Findings from physical testing, such as office access
|
||||
* Findings derived from social engineering (e.g., phishing)
|
||||
|
||||
## Example Vulnerabilities
|
||||
## Example Vulnerabilities
|
||||
|
||||
The following is a list of examples of the kinds of vulnerabilities that we’re most interested in. It is not exhaustive: there are other kinds of issues we may also be interested in!
|
||||
The following is a list of examples of the kinds of vulnerabilities that we’re
|
||||
most interested in. It is not exhaustive: there are other kinds of issues we may
|
||||
also be interested in!
|
||||
|
||||
### Specification
|
||||
|
||||
@@ -105,7 +153,8 @@ The following is a list of examples of the kinds of vulnerabilities that we’re
|
||||
|
||||
Assuming less than 1/3 of the voting power is Byzantine (malicious):
|
||||
|
||||
* Validation of blockchain data structures, including blocks, block parts, votes, and so on
|
||||
* Validation of blockchain data structures, including blocks, block parts,
|
||||
votes, and so on
|
||||
* Execution of blocks
|
||||
* Validator set changes
|
||||
* Proposer round robin
|
||||
@@ -114,6 +163,9 @@ Assuming less than 1/3 of the voting power is Byzantine (malicious):
|
||||
* A node halting (liveness failure)
|
||||
* Syncing new and old nodes
|
||||
|
||||
Assuming more than 1/3 the voting power is Byzantine:
|
||||
|
||||
* Attacks that go unpunished (unhandled evidence)
|
||||
|
||||
### Networking
|
||||
|
||||
@@ -139,7 +191,7 @@ Attacks may come through the P2P network or the RPC layer:
|
||||
|
||||
### Libraries
|
||||
|
||||
* Serialization (Amino)
|
||||
* Serialization
|
||||
* Reading/Writing files and databases
|
||||
|
||||
### Cryptography
|
||||
@@ -150,5 +202,8 @@ Attacks may come through the P2P network or the RPC layer:
|
||||
|
||||
### Light Client
|
||||
|
||||
* Core verification
|
||||
* Core verification
|
||||
* Bisection/sequential algorithms
|
||||
|
||||
[hackerone]: https://hackerone.com/cosmos
|
||||
[tmsec-mailing]: https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc
|
||||
|
||||
@@ -98,7 +98,7 @@ Sometimes it's necessary to rename libraries to avoid naming collisions or ambig
|
||||
* Make use of table driven testing where possible and not-cumbersome
|
||||
* [Inspiration](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go)
|
||||
* Make use of [assert](https://godoc.org/github.com/stretchr/testify/assert) and [require](https://godoc.org/github.com/stretchr/testify/require)
|
||||
* When using mocks, it is recommended to use Testify [mock] (<https://pkg.go.dev/github.com/stretchr/testify/mock>
|
||||
* When using mocks, it is recommended to use Testify [mock](<https://pkg.go.dev/github.com/stretchr/testify/mock>
|
||||
) along with [Mockery](https://github.com/vektra/mockery) for autogeneration
|
||||
|
||||
## Errors
|
||||
|
||||
69
UPGRADING.md
69
UPGRADING.md
@@ -1,29 +1,30 @@
|
||||
# Upgrading Tendermint Core
|
||||
|
||||
This guide provides instructions for upgrading to specific versions of Tendermint Core.
|
||||
This guide provides instructions for upgrading to specific versions of
|
||||
Tendermint Core.
|
||||
|
||||
## Unreleased
|
||||
## v0.34.24
|
||||
|
||||
### ABCI Changes
|
||||
Note that in [\#9724](https://github.com/tendermint/tendermint/pull/9724) we
|
||||
un-prettified the JSON output (i.e. removed all indentation) of the HTTP and
|
||||
WebSocket RPC for performance and subscription stability reasons. We recommend
|
||||
using a tool such as [jq](https://github.com/stedolan/jq) to obtain prettified
|
||||
output if you rely on that prettified output in some way.
|
||||
|
||||
* Added `AbciVersion` to `RequestInfo`. Applications should check that the ABCI version they expect is being used in order to avoid unimplemented changes errors.
|
||||
## v0.34.20
|
||||
|
||||
* The method `SetOption` has been removed from the ABCI.Client interface. This feature was used in the early ABCI implementation's.
|
||||
### Feature: Priority Mempool
|
||||
|
||||
### Config Changes
|
||||
This release backports an implementation of the Priority Mempool from the v0.35
|
||||
branch. This implementation of the mempool permits the application to set a
|
||||
priority on each transaction during CheckTx, and during block selection the
|
||||
highest-priority transactions are chosen (subject to the constraints on size
|
||||
and gas cost).
|
||||
|
||||
* `fast_sync = "v1"` is no longer supported. Please use `v2` instead.
|
||||
|
||||
### CLI Changes
|
||||
|
||||
* If you had previously used `tendermint gen_node_key` to generate a new node
|
||||
key, keep in mind that it no longer saves the output to a file. You can use
|
||||
`tendermint init` or pipe the output of `tendermint gen_node_key` to
|
||||
`$TMHOME/config/node_key.json`:
|
||||
|
||||
```
|
||||
$ tendermint gen_node_key > $TMHOME/config/node_key.json
|
||||
```
|
||||
Operators can enable the priority mempool by setting `mempool.version` to
|
||||
`"v1"` in the `config.toml`. For more technical details about the priority
|
||||
mempool, see [ADR 067: Mempool
|
||||
Refactor](https://github.com/tendermint/tendermint/blob/main/docs/architecture/adr-067-mempool-refactor.md).
|
||||
|
||||
## v0.34.0
|
||||
|
||||
@@ -31,7 +32,7 @@ This guide provides instructions for upgrading to specific versions of Tendermin
|
||||
This release is not compatible with previous blockchains due to changes to
|
||||
the encoding format (see "Protocol Buffers," below) and the block header (see "Blockchain Protocol").
|
||||
|
||||
Note also that Tendermint 0.34 also requires Go 1.15 or higher.
|
||||
Note also that Tendermint 0.34 also requires Go 1.16 or higher.
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
@@ -41,7 +42,7 @@ Note also that Tendermint 0.34 also requires Go 1.15 or higher.
|
||||
were added to support the new State Sync feature.
|
||||
Previously, syncing a new node to a preexisting network could take days; but with State Sync,
|
||||
new nodes are able to join a network in a matter of seconds.
|
||||
Read [the spec](https://docs.tendermint.com/master/spec/abci/apps.html#state-sync)
|
||||
Read [the spec](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/apps.md#state-sync)
|
||||
if you want to learn more about State Sync, or if you'd like your application to use it.
|
||||
(If you don't want to support State Sync in your application, you can just implement these new
|
||||
ABCI methods as no-ops, leaving them empty.)
|
||||
@@ -57,7 +58,7 @@ Note also that Tendermint 0.34 also requires Go 1.15 or higher.
|
||||
Applications should be able to handle these evidence types
|
||||
(i.e., through slashing or other accountability measures).
|
||||
|
||||
* The [`PublicKey` type](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/crypto/keys.proto#L13-L15)
|
||||
* The [`PublicKey` type](https://github.com/tendermint/tendermint/blob/v0.34.x/proto/tendermint/crypto/keys.proto#L13-L15)
|
||||
(used in ABCI as part of `ValidatorUpdate`) now uses a `oneof` protobuf type.
|
||||
Note that since Tendermint only supports ed25519 validator keys, there's only one
|
||||
option in the `oneof`. For more, see "Protocol Buffers," below.
|
||||
@@ -65,8 +66,6 @@ Note also that Tendermint 0.34 also requires Go 1.15 or higher.
|
||||
* The field `Proof`, on the ABCI type `ResponseQuery`, is now named `ProofOps`.
|
||||
For more, see "Crypto," below.
|
||||
|
||||
* The method `SetOption` has been removed from the ABCI.Client interface. This feature was used in the early ABCI implementation's.
|
||||
|
||||
### P2P Protocol
|
||||
|
||||
The default codec is now proto3, not amino. The schema files can be found in the `/proto`
|
||||
@@ -75,8 +74,8 @@ directory. For more, see "Protobuf," below.
|
||||
### Blockchain Protocol
|
||||
|
||||
* `Header#LastResultsHash`, which is the root hash of a Merkle tree built from
|
||||
`ResponseDeliverTx(Code, Data)` as of v0.34 also includes `GasWanted` and `GasUsed`
|
||||
fields.
|
||||
`ResponseDeliverTx(Code, Data)` as of v0.34 also includes `GasWanted` and `GasUsed`
|
||||
fields.
|
||||
|
||||
* Merkle hashes of empty trees previously returned nothing, but now return the hash of an empty input,
|
||||
to conform with [RFC-6962](https://tools.ietf.org/html/rfc6962).
|
||||
@@ -166,7 +165,7 @@ The `bech32` package has moved to the Cosmos SDK:
|
||||
### CLI
|
||||
|
||||
The `tendermint lite` command has been renamed to `tendermint light` and has a slightly different API.
|
||||
See [the docs](https://docs.tendermint.com/master/tendermint-core/light-client-protocol.html#http-proxy) for details.
|
||||
See [the docs](https://docs.tendermint.com/v0.33/tendermint-core/light-client-protocol.html#http-proxy) for details.
|
||||
|
||||
### Light Client
|
||||
|
||||
@@ -180,7 +179,7 @@ Other user-relevant changes include:
|
||||
* The `Verifier` was broken up into two pieces:
|
||||
* Core verification logic (pure `VerifyX` functions)
|
||||
* `Client` object, which represents the complete light client
|
||||
* The new light clients stores headers & validator sets as `LightBlock`s
|
||||
* The new light client stores headers and validator sets as `LightBlock`s
|
||||
* The RPC client can be found in the `/rpc` directory.
|
||||
* The HTTP(S) proxy is located in the `/proxy` directory.
|
||||
|
||||
@@ -322,7 +321,7 @@ Evidence Params has been changed to include duration.
|
||||
### RPC Changes
|
||||
|
||||
* `/validators` is now paginated (default: 30 vals per page)
|
||||
* `/block_results` response format updated [see RPC docs for details](https://docs.tendermint.com/master/rpc/#/Info/block_results)
|
||||
* `/block_results` response format updated [see RPC docs for details](https://docs.tendermint.com/v0.33/rpc/#/Info/block_results)
|
||||
* Event suffix has been removed from the ID in event responses
|
||||
* IDs are now integers not `json-client-XYZ`
|
||||
|
||||
@@ -441,11 +440,11 @@ the compilation tag:
|
||||
|
||||
Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
|
||||
use `make build_c` / `make install_c` (full instructions can be found at
|
||||
<https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support>)
|
||||
<https://docs.tendermint.com/v0.33/introduction/install.html#compile-with-cleveldb-support>)
|
||||
|
||||
## v0.31.0
|
||||
|
||||
This release contains a breaking change to the behaviour of the pubsub system.
|
||||
This release contains a breaking change to the behavior of the pubsub system.
|
||||
It also contains some minor breaking changes in the Go API and ABCI.
|
||||
There are no changes to the block or p2p protocols, so v0.31.0 should work fine
|
||||
with blockchains created from the v0.30 series.
|
||||
@@ -463,7 +462,7 @@ In this case, the WS client will receive an error with description:
|
||||
"error": {
|
||||
"code": -32000,
|
||||
"msg": "Server error",
|
||||
"data": "subscription was cancelled (reason: client is not pulling messages fast enough)" // or "subscription was cancelled (reason: Tendermint exited)"
|
||||
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -516,14 +515,14 @@ due to changes in how various data structures are hashed.
|
||||
Any implementations of Tendermint blockchain verification, including lite clients,
|
||||
will need to be updated. For specific details:
|
||||
|
||||
* [Merkle tree](https://github.com/tendermint/spec/blob/master/spec/blockchain/encoding.md#merkle-trees)
|
||||
* [ConsensusParams](https://github.com/tendermint/spec/blob/master/spec/blockchain/state.md#consensusparams)
|
||||
* [Merkle tree](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/encoding.md#merkle-trees)
|
||||
* [ConsensusParams](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/blockchain/state.md#consensusparams)
|
||||
|
||||
There was also a small change to field ordering in the vote struct. Any
|
||||
implementations of an out-of-process validator (like a Key-Management Server)
|
||||
will need to be updated. For specific details:
|
||||
|
||||
* [Vote](https://github.com/tendermint/spec/blob/master/spec/consensus/signing.md#votes)
|
||||
* [Vote](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/consensus/signing.md#votes)
|
||||
|
||||
Finally, the proposer selection algorithm continues to evolve. See the
|
||||
[work-in-progress
|
||||
@@ -644,7 +643,7 @@ to `timeout_propose = "3s"`.
|
||||
|
||||
### RPC Changes
|
||||
|
||||
The default behaviour of `/abci_query` has been changed to not return a proof,
|
||||
The default behavior of `/abci_query` has been changed to not return a proof,
|
||||
and the name of the parameter that controls this has been changed from `trusted`
|
||||
to `prove`. To get proofs with your queries, ensure you set `prove=true`.
|
||||
|
||||
|
||||
66
Vagrantfile
vendored
Normal file
66
Vagrantfile
vendored
Normal file
@@ -0,0 +1,66 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "ubuntu/focal64"
|
||||
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
v.memory = 4096
|
||||
v.cpus = 2
|
||||
end
|
||||
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
apt-get update
|
||||
|
||||
# install base requirements
|
||||
apt-get install -y --no-install-recommends wget curl jq zip \
|
||||
make shellcheck bsdmainutils psmisc
|
||||
apt-get install -y language-pack-en
|
||||
|
||||
# install docker
|
||||
apt-get install -y --no-install-recommends apt-transport-https \
|
||||
ca-certificates \
|
||||
curl \
|
||||
gnupg-agent \
|
||||
software-properties-common
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
||||
add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable"
|
||||
apt-get update
|
||||
apt-get install -y docker-ce
|
||||
usermod -aG docker vagrant
|
||||
|
||||
# install go
|
||||
wget -q https://dl.google.com/go/go1.15.linux-amd64.tar.gz
|
||||
tar -xvf go1.15.linux-amd64.tar.gz
|
||||
mv go /usr/local
|
||||
rm -f go1.15.linux-amd64.tar.gz
|
||||
|
||||
# install nodejs (for docs)
|
||||
curl -sL https://deb.nodesource.com/setup_11.x | bash -
|
||||
apt-get install -y nodejs
|
||||
|
||||
# cleanup
|
||||
apt-get autoremove -y
|
||||
|
||||
# set env variables
|
||||
echo 'export GOROOT=/usr/local/go' >> /home/vagrant/.bash_profile
|
||||
echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.bash_profile
|
||||
echo 'export PATH=$PATH:$GOROOT/bin:$GOPATH/bin' >> /home/vagrant/.bash_profile
|
||||
echo 'export LC_ALL=en_US.UTF-8' >> /home/vagrant/.bash_profile
|
||||
echo 'cd go/src/github.com/tendermint/tendermint' >> /home/vagrant/.bash_profile
|
||||
|
||||
mkdir -p /home/vagrant/go/bin
|
||||
mkdir -p /home/vagrant/go/src/github.com/tendermint
|
||||
ln -s /vagrant /home/vagrant/go/src/github.com/tendermint/tendermint
|
||||
|
||||
chown -R vagrant:vagrant /home/vagrant/go
|
||||
chown vagrant:vagrant /home/vagrant/.bash_profile
|
||||
|
||||
# get all deps and tools, ready to install/test
|
||||
su - vagrant -c 'source /home/vagrant/.bash_profile'
|
||||
su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make tools'
|
||||
SHELL
|
||||
end
|
||||
@@ -19,7 +19,7 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g
|
||||
|
||||
A detailed description of the ABCI methods and message types is contained in:
|
||||
|
||||
- [The main spec](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md)
|
||||
- [The main spec](https://github.com/tendermint/tendermint/blob/v0.34.x/spec/abci/abci.md)
|
||||
- [A protobuf file](./types/types.proto)
|
||||
- [A Go interface](./types/application.go)
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
@@ -15,53 +14,48 @@ const (
|
||||
echoRetryIntervalSeconds = 1
|
||||
)
|
||||
|
||||
//go:generate mockery --case underscore --name Client
|
||||
|
||||
// Client defines an interface for an ABCI client.
|
||||
//
|
||||
// All `Async` methods return a `ReqRes` object and an error.
|
||||
// All `Async` methods return a `ReqRes` object.
|
||||
// All `Sync` methods return the appropriate protobuf ResponseXxx struct and an error.
|
||||
//
|
||||
// NOTE these are client errors, eg. ABCI socket connectivity issues.
|
||||
// Application-related errors are reflected in response via ABCI error codes
|
||||
// and logs.
|
||||
// Note these are client errors, eg. ABCI socket connectivity issues.
|
||||
// Application-related errors are reflected in response via ABCI error codes and logs.
|
||||
type Client interface {
|
||||
service.Service
|
||||
|
||||
SetResponseCallback(Callback)
|
||||
Error() error
|
||||
|
||||
// Asynchronous requests
|
||||
FlushAsync(context.Context) (*ReqRes, error)
|
||||
EchoAsync(ctx context.Context, msg string) (*ReqRes, error)
|
||||
InfoAsync(context.Context, types.RequestInfo) (*ReqRes, error)
|
||||
DeliverTxAsync(context.Context, types.RequestDeliverTx) (*ReqRes, error)
|
||||
CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error)
|
||||
QueryAsync(context.Context, types.RequestQuery) (*ReqRes, error)
|
||||
CommitAsync(context.Context) (*ReqRes, error)
|
||||
InitChainAsync(context.Context, types.RequestInitChain) (*ReqRes, error)
|
||||
BeginBlockAsync(context.Context, types.RequestBeginBlock) (*ReqRes, error)
|
||||
EndBlockAsync(context.Context, types.RequestEndBlock) (*ReqRes, error)
|
||||
ListSnapshotsAsync(context.Context, types.RequestListSnapshots) (*ReqRes, error)
|
||||
OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error)
|
||||
LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error)
|
||||
ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error)
|
||||
FlushAsync() *ReqRes
|
||||
EchoAsync(msg string) *ReqRes
|
||||
InfoAsync(types.RequestInfo) *ReqRes
|
||||
SetOptionAsync(types.RequestSetOption) *ReqRes
|
||||
DeliverTxAsync(types.RequestDeliverTx) *ReqRes
|
||||
CheckTxAsync(types.RequestCheckTx) *ReqRes
|
||||
QueryAsync(types.RequestQuery) *ReqRes
|
||||
CommitAsync() *ReqRes
|
||||
InitChainAsync(types.RequestInitChain) *ReqRes
|
||||
BeginBlockAsync(types.RequestBeginBlock) *ReqRes
|
||||
EndBlockAsync(types.RequestEndBlock) *ReqRes
|
||||
ListSnapshotsAsync(types.RequestListSnapshots) *ReqRes
|
||||
OfferSnapshotAsync(types.RequestOfferSnapshot) *ReqRes
|
||||
LoadSnapshotChunkAsync(types.RequestLoadSnapshotChunk) *ReqRes
|
||||
ApplySnapshotChunkAsync(types.RequestApplySnapshotChunk) *ReqRes
|
||||
|
||||
// Synchronous requests
|
||||
FlushSync(context.Context) error
|
||||
EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error)
|
||||
InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
|
||||
DeliverTxSync(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
|
||||
CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||
QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
|
||||
CommitSync(context.Context) (*types.ResponseCommit, error)
|
||||
InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
|
||||
EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error)
|
||||
ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
||||
OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
||||
LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
||||
ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
||||
FlushSync() error
|
||||
EchoSync(msg string) (*types.ResponseEcho, error)
|
||||
InfoSync(types.RequestInfo) (*types.ResponseInfo, error)
|
||||
SetOptionSync(types.RequestSetOption) (*types.ResponseSetOption, error)
|
||||
DeliverTxSync(types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
|
||||
CheckTxSync(types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||
QuerySync(types.RequestQuery) (*types.ResponseQuery, error)
|
||||
CommitSync() (*types.ResponseCommit, error)
|
||||
InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
BeginBlockSync(types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
|
||||
EndBlockSync(types.RequestEndBlock) (*types.ResponseEndBlock, error)
|
||||
ListSnapshotsSync(types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
||||
OfferSnapshotSync(types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
||||
LoadSnapshotChunkSync(types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
||||
ApplySnapshotChunkSync(types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -80,20 +74,22 @@ func NewClient(addr, transport string, mustConnect bool) (client Client, err err
|
||||
return
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
type Callback func(*types.Request, *types.Response)
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
type ReqRes struct {
|
||||
*types.Request
|
||||
*sync.WaitGroup
|
||||
*types.Response // Not set atomically, so be sure to use WaitGroup.
|
||||
|
||||
mtx tmsync.Mutex
|
||||
done bool // Gets set to true once *after* WaitGroup.Done().
|
||||
cb func(*types.Response) // A single callback that may be set.
|
||||
mtx tmsync.Mutex
|
||||
|
||||
// callbackInvoked as a variable to track if the callback was already
|
||||
// invoked during the regular execution of the request. This variable
|
||||
// allows clients to set the callback simultaneously without potentially
|
||||
// invoking the callback twice by accident, once when 'SetCallback' is
|
||||
// called and once during the normal request.
|
||||
callbackInvoked bool
|
||||
cb func(*types.Response) // A single callback that may be set.
|
||||
}
|
||||
|
||||
func NewReqRes(req *types.Request) *ReqRes {
|
||||
@@ -102,39 +98,49 @@ func NewReqRes(req *types.Request) *ReqRes {
|
||||
WaitGroup: waitGroup1(),
|
||||
Response: nil,
|
||||
|
||||
done: false,
|
||||
cb: nil,
|
||||
callbackInvoked: false,
|
||||
cb: nil,
|
||||
}
|
||||
}
|
||||
|
||||
// Sets the callback for this ReqRes atomically.
|
||||
// If reqRes is already done, calls cb immediately.
|
||||
// NOTE: reqRes.cb should not change if reqRes.done.
|
||||
// NOTE: only one callback is supported.
|
||||
func (reqRes *ReqRes) SetCallback(cb func(res *types.Response)) {
|
||||
reqRes.mtx.Lock()
|
||||
// Sets sets the callback. If reqRes is already done, it will call the cb
|
||||
// immediately. Note, reqRes.cb should not change if reqRes.done and only one
|
||||
// callback is supported.
|
||||
func (r *ReqRes) SetCallback(cb func(res *types.Response)) {
|
||||
r.mtx.Lock()
|
||||
|
||||
if reqRes.done {
|
||||
reqRes.mtx.Unlock()
|
||||
cb(reqRes.Response)
|
||||
if r.callbackInvoked {
|
||||
r.mtx.Unlock()
|
||||
cb(r.Response)
|
||||
return
|
||||
}
|
||||
|
||||
reqRes.cb = cb
|
||||
reqRes.mtx.Unlock()
|
||||
r.cb = cb
|
||||
r.mtx.Unlock()
|
||||
}
|
||||
|
||||
func (reqRes *ReqRes) GetCallback() func(*types.Response) {
|
||||
reqRes.mtx.Lock()
|
||||
defer reqRes.mtx.Unlock()
|
||||
return reqRes.cb
|
||||
// InvokeCallback invokes a thread-safe execution of the configured callback
|
||||
// if non-nil.
|
||||
func (r *ReqRes) InvokeCallback() {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
|
||||
if r.cb != nil {
|
||||
r.cb(r.Response)
|
||||
}
|
||||
r.callbackInvoked = true
|
||||
}
|
||||
|
||||
// NOTE: it should be safe to read reqRes.cb without locks after this.
|
||||
func (reqRes *ReqRes) SetDone() {
|
||||
reqRes.mtx.Lock()
|
||||
reqRes.done = true
|
||||
reqRes.mtx.Unlock()
|
||||
// GetCallback returns the configured callback of the ReqRes object which may be
|
||||
// nil. Note, it is not safe to concurrently call this in cases where it is
|
||||
// marked done and SetCallback is called before calling GetCallback as that
|
||||
// will invoke the callback twice and create a potential race condition.
|
||||
//
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5439
|
||||
func (r *ReqRes) GetCallback() func(*types.Response) {
|
||||
r.mtx.Lock()
|
||||
defer r.mtx.Unlock()
|
||||
return r.cb
|
||||
}
|
||||
|
||||
func waitGroup1() (wg *sync.WaitGroup) {
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
// Package abcicli provides an ABCI implementation in Go.
|
||||
//
|
||||
// There are 3 clients available:
|
||||
// 1. socket (unix or TCP)
|
||||
// 2. local (in memory)
|
||||
// 3. gRPC
|
||||
//
|
||||
// ## Socket client
|
||||
//
|
||||
// async: the client maintains an internal buffer of a fixed size. when the
|
||||
// buffer becomes full, all Async calls will return an error immediately.
|
||||
//
|
||||
// sync: the client blocks on 1) enqueuing the Sync request 2) enqueuing the
|
||||
// Flush requests 3) waiting for the Flush response
|
||||
//
|
||||
// ## Local client
|
||||
//
|
||||
// async: global mutex is locked during each call (meaning it's not really async!)
|
||||
// sync: global mutex is locked during each call
|
||||
//
|
||||
// ## gRPC client
|
||||
//
|
||||
// async: gRPC is synchronous, but an internal buffer of a fixed size is used
|
||||
// to store responses and later call callbacks (separate goroutine per
|
||||
// response).
|
||||
//
|
||||
// sync: waits for all Async calls to complete (essentially what Flush does in
|
||||
// the socket client) and calls Sync method.
|
||||
package abcicli
|
||||
@@ -1,12 +1,12 @@
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
@@ -15,7 +15,10 @@ import (
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
// A gRPC client.
|
||||
var _ Client = (*grpcClient)(nil)
|
||||
|
||||
// A stripped copy of the remoteClient that makes
|
||||
// synchronous calls using grpc
|
||||
type grpcClient struct {
|
||||
service.BaseService
|
||||
mustConnect bool
|
||||
@@ -30,18 +33,6 @@ type grpcClient struct {
|
||||
resCb func(*types.Request, *types.Response) // listens to all callbacks
|
||||
}
|
||||
|
||||
var _ Client = (*grpcClient)(nil)
|
||||
|
||||
// NewGRPCClient creates a gRPC client, which will connect to addr upon the
|
||||
// start. Note Client#Start returns an error if connection is unsuccessful and
|
||||
// mustConnect is true.
|
||||
//
|
||||
// GRPC calls are synchronous, but some callbacks expect to be called
|
||||
// asynchronously (eg. the mempool expects to be able to lock to remove bad txs
|
||||
// from cache). To accommodate, we finish each call in its own go-routine,
|
||||
// which is expensive, but easy - if you want something better, use the socket
|
||||
// protocol! maybe one day, if people really want it, we use grpc streams, but
|
||||
// hopefully not :D
|
||||
func NewGRPCClient(addr string, mustConnect bool) Client {
|
||||
cli := &grpcClient{
|
||||
addr: addr,
|
||||
@@ -63,6 +54,10 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OnStart() error {
|
||||
if err := cli.BaseService.OnStart(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// This processes asynchronous request/response messages and dispatches
|
||||
// them to callbacks.
|
||||
go func() {
|
||||
@@ -71,7 +66,6 @@ func (cli *grpcClient) OnStart() error {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
reqres.SetDone()
|
||||
reqres.Done()
|
||||
|
||||
// Notify client listener if set
|
||||
@@ -80,9 +74,7 @@ func (cli *grpcClient) OnStart() error {
|
||||
}
|
||||
|
||||
// Notify reqRes listener if set
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(reqres.Response)
|
||||
}
|
||||
reqres.InvokeCallback()
|
||||
}
|
||||
for reqres := range cli.chReqRes {
|
||||
if reqres != nil {
|
||||
@@ -95,6 +87,7 @@ func (cli *grpcClient) OnStart() error {
|
||||
|
||||
RETRY_LOOP:
|
||||
for {
|
||||
//nolint:staticcheck // SA1019 Existing use of deprecated but supported dial option.
|
||||
conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
if cli.mustConnect {
|
||||
@@ -125,6 +118,8 @@ RETRY_LOOP:
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OnStop() {
|
||||
cli.BaseService.OnStop()
|
||||
|
||||
if cli.conn != nil {
|
||||
cli.conn.Close()
|
||||
}
|
||||
@@ -132,11 +127,11 @@ func (cli *grpcClient) OnStop() {
|
||||
}
|
||||
|
||||
func (cli *grpcClient) StopForError(err error) {
|
||||
cli.mtx.Lock()
|
||||
if !cli.IsRunning() {
|
||||
return
|
||||
}
|
||||
|
||||
cli.mtx.Lock()
|
||||
if cli.err == nil {
|
||||
cli.err = err
|
||||
}
|
||||
@@ -163,168 +158,155 @@ func (cli *grpcClient) SetResponseCallback(resCb Callback) {
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// GRPC calls are synchronous, but some callbacks expect to be called asynchronously
|
||||
// (eg. the mempool expects to be able to lock to remove bad txs from cache).
|
||||
// To accommodate, we finish each call in its own go-routine,
|
||||
// which is expensive, but easy - if you want something better, use the socket protocol!
|
||||
// maybe one day, if people really want it, we use grpc streams,
|
||||
// but hopefully not :D
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
|
||||
func (cli *grpcClient) EchoAsync(msg string) *ReqRes {
|
||||
req := types.ToRequestEcho(msg)
|
||||
res, err := cli.client.Echo(ctx, req.GetEcho(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Echo{Echo: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{Echo: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
func (cli *grpcClient) FlushAsync() *ReqRes {
|
||||
req := types.ToRequestFlush()
|
||||
res, err := cli.client.Flush(ctx, req.GetFlush(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Flush{Flush: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{Flush: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) InfoAsync(ctx context.Context, params types.RequestInfo) (*ReqRes, error) {
|
||||
func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
|
||||
req := types.ToRequestInfo(params)
|
||||
res, err := cli.client.Info(ctx, req.GetInfo(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
|
||||
func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
|
||||
req := types.ToRequestSetOption(params)
|
||||
res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{SetOption: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
req := types.ToRequestDeliverTx(params)
|
||||
res, err := cli.client.DeliverTx(ctx, req.GetDeliverTx(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestCheckTx) (*ReqRes, error) {
|
||||
func (cli *grpcClient) CheckTxAsync(params types.RequestCheckTx) *ReqRes {
|
||||
req := types.ToRequestCheckTx(params)
|
||||
res, err := cli.client.CheckTx(ctx, req.GetCheckTx(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) QueryAsync(ctx context.Context, params types.RequestQuery) (*ReqRes, error) {
|
||||
func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
|
||||
req := types.ToRequestQuery(params)
|
||||
res, err := cli.client.Query(ctx, req.GetQuery(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Query{Query: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{Query: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
func (cli *grpcClient) CommitAsync() *ReqRes {
|
||||
req := types.ToRequestCommit()
|
||||
res, err := cli.client.Commit(ctx, req.GetCommit(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Commit{Commit: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{Commit: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) InitChainAsync(ctx context.Context, params types.RequestInitChain) (*ReqRes, error) {
|
||||
func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
|
||||
req := types.ToRequestInitChain(params)
|
||||
res, err := cli.client.InitChain(ctx, req.GetInitChain(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) BeginBlockAsync(ctx context.Context, params types.RequestBeginBlock) (*ReqRes, error) {
|
||||
func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
|
||||
req := types.ToRequestBeginBlock(params)
|
||||
res, err := cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) EndBlockAsync(ctx context.Context, params types.RequestEndBlock) (*ReqRes, error) {
|
||||
func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
|
||||
req := types.ToRequestEndBlock(params)
|
||||
res, err := cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params types.RequestListSnapshots) (*ReqRes, error) {
|
||||
func (cli *grpcClient) ListSnapshotsAsync(params types.RequestListSnapshots) *ReqRes {
|
||||
req := types.ToRequestListSnapshots(params)
|
||||
res, err := cli.client.ListSnapshots(ctx, req.GetListSnapshots(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.ListSnapshots(context.Background(), req.GetListSnapshots(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_ListSnapshots{ListSnapshots: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ListSnapshots{ListSnapshots: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) OfferSnapshotAsync(ctx context.Context, params types.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
func (cli *grpcClient) OfferSnapshotAsync(params types.RequestOfferSnapshot) *ReqRes {
|
||||
req := types.ToRequestOfferSnapshot(params)
|
||||
res, err := cli.client.OfferSnapshot(ctx, req.GetOfferSnapshot(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.OfferSnapshot(context.Background(), req.GetOfferSnapshot(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_OfferSnapshot{OfferSnapshot: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_OfferSnapshot{OfferSnapshot: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) LoadSnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
params types.RequestLoadSnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
func (cli *grpcClient) LoadSnapshotChunkAsync(params types.RequestLoadSnapshotChunk) *ReqRes {
|
||||
req := types.ToRequestLoadSnapshotChunk(params)
|
||||
res, err := cli.client.LoadSnapshotChunk(ctx, req.GetLoadSnapshotChunk(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.LoadSnapshotChunk(context.Background(), req.GetLoadSnapshotChunk(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) ApplySnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
params types.RequestApplySnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
func (cli *grpcClient) ApplySnapshotChunkAsync(params types.RequestApplySnapshotChunk) *ReqRes {
|
||||
req := types.ToRequestApplySnapshotChunk(params)
|
||||
res, err := cli.client.ApplySnapshotChunk(ctx, req.GetApplySnapshotChunk(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.ApplySnapshotChunk(context.Background(), req.GetApplySnapshotChunk(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(
|
||||
ctx,
|
||||
req,
|
||||
&types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}},
|
||||
)
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}})
|
||||
}
|
||||
|
||||
// finishAsyncCall creates a ReqRes for an async call, and immediately populates it
|
||||
// with the response. We don't complete it until it's been ordered via the channel.
|
||||
func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request, res *types.Response) (*ReqRes, error) {
|
||||
func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes {
|
||||
reqres := NewReqRes(req)
|
||||
reqres.Response = res
|
||||
select {
|
||||
case cli.chReqRes <- reqres: // use channel for async responses, since they must be ordered
|
||||
return reqres, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
cli.chReqRes <- reqres // use channel for async responses, since they must be ordered
|
||||
return reqres
|
||||
}
|
||||
|
||||
// finishSyncCall waits for an async call to complete. It is necessary to call all
|
||||
@@ -357,150 +339,81 @@ func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *types.Response {
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *grpcClient) FlushSync(ctx context.Context) error {
|
||||
return nil
|
||||
func (cli *grpcClient) FlushSync() error {
|
||||
reqres := cli.FlushAsync()
|
||||
cli.finishSyncCall(reqres).GetFlush()
|
||||
return cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
reqres, err := cli.EchoAsync(ctx, msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
reqres := cli.EchoAsync(msg)
|
||||
// StopForError should already have been called if error is set
|
||||
return cli.finishSyncCall(reqres).GetEcho(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InfoSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInfo,
|
||||
) (*types.ResponseInfo, error) {
|
||||
reqres, err := cli.InfoAsync(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
reqres := cli.InfoAsync(req)
|
||||
return cli.finishSyncCall(reqres).GetInfo(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
params types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
func (cli *grpcClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
reqres := cli.SetOptionAsync(req)
|
||||
return reqres.Response.GetSetOption(), cli.Error()
|
||||
}
|
||||
|
||||
reqres, err := cli.DeliverTxAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) DeliverTxSync(params types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.DeliverTxAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
params types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
|
||||
reqres, err := cli.CheckTxAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) CheckTxSync(params types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
reqres := cli.CheckTxAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetCheckTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
reqres, err := cli.QueryAsync(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
reqres := cli.QueryAsync(req)
|
||||
return cli.finishSyncCall(reqres).GetQuery(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
reqres, err := cli.CommitAsync(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) CommitSync() (*types.ResponseCommit, error) {
|
||||
reqres := cli.CommitAsync()
|
||||
return cli.finishSyncCall(reqres).GetCommit(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InitChainSync(
|
||||
ctx context.Context,
|
||||
params types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
reqres, err := cli.InitChainAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) InitChainSync(params types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
reqres := cli.InitChainAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetInitChain(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
reqres, err := cli.BeginBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) BeginBlockSync(params types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
reqres := cli.BeginBlockAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetBeginBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
reqres, err := cli.EndBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) EndBlockSync(params types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
reqres := cli.EndBlockAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetEndBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
params types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
reqres, err := cli.ListSnapshotsAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) ListSnapshotsSync(params types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
reqres := cli.ListSnapshotsAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetListSnapshots(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OfferSnapshotSync(
|
||||
ctx context.Context,
|
||||
params types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
reqres, err := cli.OfferSnapshotAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
func (cli *grpcClient) OfferSnapshotSync(params types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
reqres := cli.OfferSnapshotAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetOfferSnapshot(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) LoadSnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
params types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.LoadSnapshotChunkAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reqres := cli.LoadSnapshotChunkAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetLoadSnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ApplySnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
params types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.ApplySnapshotChunkAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
reqres := cli.ApplySnapshotChunkAsync(params)
|
||||
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
var _ Client = (*localClient)(nil)
|
||||
|
||||
// NOTE: use defer to unlock mutex because Application might panic (e.g., in
|
||||
// case of malicious tx or query). It only makes sense for publicly exposed
|
||||
// methods like CheckTx (/broadcast_tx_* RPC endpoint) or Query (/abci_query
|
||||
@@ -49,22 +49,22 @@ func (app *localClient) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *localClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
func (app *localClient) FlushAsync() *ReqRes {
|
||||
// Do nothing
|
||||
return newLocalReqRes(types.ToRequestFlush(), nil), nil
|
||||
return newLocalReqRes(types.ToRequestFlush(), nil)
|
||||
}
|
||||
|
||||
func (app *localClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
|
||||
func (app *localClient) EchoAsync(msg string) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.callback(
|
||||
types.ToRequestEcho(msg),
|
||||
types.ToResponseEcho(msg),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
|
||||
func (app *localClient) InfoAsync(req types.RequestInfo) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -72,10 +72,21 @@ func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*
|
||||
return app.callback(
|
||||
types.ToRequestInfo(req),
|
||||
types.ToResponseInfo(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
|
||||
func (app *localClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.SetOption(req)
|
||||
return app.callback(
|
||||
types.ToRequestSetOption(req),
|
||||
types.ToResponseSetOption(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -83,10 +94,10 @@ func (app *localClient) DeliverTxAsync(ctx context.Context, params types.Request
|
||||
return app.callback(
|
||||
types.ToRequestDeliverTx(params),
|
||||
types.ToResponseDeliverTx(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
||||
func (app *localClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -94,10 +105,10 @@ func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheck
|
||||
return app.callback(
|
||||
types.ToRequestCheckTx(req),
|
||||
types.ToResponseCheckTx(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
|
||||
func (app *localClient) QueryAsync(req types.RequestQuery) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -105,10 +116,10 @@ func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery)
|
||||
return app.callback(
|
||||
types.ToRequestQuery(req),
|
||||
types.ToResponseQuery(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
func (app *localClient) CommitAsync() *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -116,10 +127,10 @@ func (app *localClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
return app.callback(
|
||||
types.ToRequestCommit(),
|
||||
types.ToResponseCommit(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) {
|
||||
func (app *localClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -127,10 +138,10 @@ func (app *localClient) InitChainAsync(ctx context.Context, req types.RequestIni
|
||||
return app.callback(
|
||||
types.ToRequestInitChain(req),
|
||||
types.ToResponseInitChain(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
|
||||
func (app *localClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -138,10 +149,10 @@ func (app *localClient) BeginBlockAsync(ctx context.Context, req types.RequestBe
|
||||
return app.callback(
|
||||
types.ToRequestBeginBlock(req),
|
||||
types.ToResponseBeginBlock(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
|
||||
func (app *localClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -149,10 +160,10 @@ func (app *localClient) EndBlockAsync(ctx context.Context, req types.RequestEndB
|
||||
return app.callback(
|
||||
types.ToRequestEndBlock(req),
|
||||
types.ToResponseEndBlock(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||
func (app *localClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -160,10 +171,10 @@ func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.Reques
|
||||
return app.callback(
|
||||
types.ToRequestListSnapshots(req),
|
||||
types.ToResponseListSnapshots(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
func (app *localClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -171,13 +182,10 @@ func (app *localClient) OfferSnapshotAsync(ctx context.Context, req types.Reques
|
||||
return app.callback(
|
||||
types.ToRequestOfferSnapshot(req),
|
||||
types.ToResponseOfferSnapshot(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
func (app *localClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -185,13 +193,10 @@ func (app *localClient) LoadSnapshotChunkAsync(
|
||||
return app.callback(
|
||||
types.ToRequestLoadSnapshotChunk(req),
|
||||
types.ToResponseLoadSnapshotChunk(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
func (app *localClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -199,20 +204,20 @@ func (app *localClient) ApplySnapshotChunkAsync(
|
||||
return app.callback(
|
||||
types.ToRequestApplySnapshotChunk(req),
|
||||
types.ToResponseApplySnapshotChunk(res),
|
||||
), nil
|
||||
)
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) FlushSync(ctx context.Context) error {
|
||||
func (app *localClient) FlushSync() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *localClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
func (app *localClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: msg}, nil
|
||||
}
|
||||
|
||||
func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
func (app *localClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -220,11 +225,15 @@ func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*t
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
func (app *localClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.SetOption(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -232,10 +241,7 @@ func (app *localClient) DeliverTxSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
func (app *localClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -243,10 +249,7 @@ func (app *localClient) CheckTxSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
func (app *localClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -254,7 +257,7 @@ func (app *localClient) QuerySync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
func (app *localClient) CommitSync() (*types.ResponseCommit, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -262,11 +265,7 @@ func (app *localClient) CommitSync(ctx context.Context) (*types.ResponseCommit,
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) InitChainSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
func (app *localClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -274,11 +273,7 @@ func (app *localClient) InitChainSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
func (app *localClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -286,11 +281,7 @@ func (app *localClient) BeginBlockSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
func (app *localClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -298,11 +289,7 @@ func (app *localClient) EndBlockSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
func (app *localClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -310,11 +297,7 @@ func (app *localClient) ListSnapshotsSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) OfferSnapshotSync(
|
||||
ctx context.Context,
|
||||
req types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
func (app *localClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -323,9 +306,7 @@ func (app *localClient) OfferSnapshotSync(
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -334,9 +315,7 @@ func (app *localClient) LoadSnapshotChunkSync(
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -348,12 +327,13 @@ func (app *localClient) ApplySnapshotChunkSync(
|
||||
|
||||
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {
|
||||
app.Callback(req, res)
|
||||
return newLocalReqRes(req, res)
|
||||
rr := newLocalReqRes(req, res)
|
||||
rr.callbackInvoked = true
|
||||
return rr
|
||||
}
|
||||
|
||||
func newLocalReqRes(req *types.Request, res *types.Response) *ReqRes {
|
||||
reqRes := NewReqRes(req)
|
||||
reqRes.Response = res
|
||||
reqRes.SetDone()
|
||||
return reqRes
|
||||
}
|
||||
|
||||
@@ -1,12 +1,9 @@
|
||||
// Code generated by mockery v2.3.0. DO NOT EDIT.
|
||||
// Code generated by mockery v1.1.1. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
|
||||
log "github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
@@ -19,36 +16,29 @@ type Client struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// ApplySnapshotChunkAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// ApplySnapshotChunkAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ApplySnapshotChunkAsync(_a0 types.RequestApplySnapshotChunk) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
return r0
|
||||
}
|
||||
|
||||
// ApplySnapshotChunkSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// ApplySnapshotChunkSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ApplySnapshotChunkSync(_a0 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk)
|
||||
@@ -56,8 +46,8 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(1).(func(types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -65,36 +55,29 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// BeginBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// BeginBlockAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) BeginBlockAsync(_a0 types.RequestBeginBlock) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
return r0
|
||||
}
|
||||
|
||||
// BeginBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// BeginBlockSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) BeginBlockSync(_a0 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseBeginBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseBeginBlock)
|
||||
@@ -102,8 +85,8 @@ func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBloc
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(1).(func(types.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -111,36 +94,29 @@ func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBloc
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CheckTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// CheckTxAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CheckTxAsync(_a0 types.RequestCheckTx) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
return r0
|
||||
}
|
||||
|
||||
// CheckTxSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// CheckTxSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CheckTxSync(_a0 types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCheckTx)
|
||||
@@ -148,30 +124,7 @@ func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*t
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CommitAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CommitAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -180,13 +133,29 @@ func (_m *Client) CommitAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CommitSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// CommitAsync provides a mock function with given fields:
|
||||
func (_m *Client) CommitAsync() *abcicli.ReqRes {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func() *abcicli.ReqRes); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// CommitSync provides a mock function with given fields:
|
||||
func (_m *Client) CommitSync() (*types.ResponseCommit, error) {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 *types.ResponseCommit
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *types.ResponseCommit); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func() *types.ResponseCommit); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCommit)
|
||||
@@ -194,7 +163,46 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DeliverTxAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) DeliverTxAsync(_a0 types.RequestDeliverTx) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// DeliverTxSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) DeliverTxSync(_a0 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseDeliverTx
|
||||
if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseDeliverTx)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -203,82 +211,29 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error)
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DeliverTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// EchoAsync provides a mock function with given fields: msg
|
||||
func (_m *Client) EchoAsync(msg string) *abcicli.ReqRes {
|
||||
ret := _m.Called(msg)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(string) *abcicli.ReqRes); ok {
|
||||
r0 = rf(msg)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
return r0
|
||||
}
|
||||
|
||||
// DeliverTxSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseDeliverTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseDeliverTx)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EchoAsync provides a mock function with given fields: ctx, msg
|
||||
func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(ctx, msg)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *abcicli.ReqRes); ok {
|
||||
r0 = rf(ctx, msg)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, msg)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EchoSync provides a mock function with given fields: ctx, msg
|
||||
func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
ret := _m.Called(ctx, msg)
|
||||
// EchoSync provides a mock function with given fields: msg
|
||||
func (_m *Client) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
ret := _m.Called(msg)
|
||||
|
||||
var r0 *types.ResponseEcho
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *types.ResponseEcho); ok {
|
||||
r0 = rf(ctx, msg)
|
||||
if rf, ok := ret.Get(0).(func(string) *types.ResponseEcho); ok {
|
||||
r0 = rf(msg)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseEcho)
|
||||
@@ -286,8 +241,8 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, msg)
|
||||
if rf, ok := ret.Get(1).(func(string) error); ok {
|
||||
r1 = rf(msg)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -295,36 +250,29 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EndBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// EndBlockAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) EndBlockAsync(_a0 types.RequestEndBlock) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestEndBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
return r0
|
||||
}
|
||||
|
||||
// EndBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// EndBlockSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) EndBlockSync(_a0 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseEndBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestEndBlock) *types.ResponseEndBlock); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseEndBlock)
|
||||
@@ -332,8 +280,8 @@ func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(1).(func(types.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -355,12 +303,42 @@ func (_m *Client) Error() error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// FlushAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
// FlushAsync provides a mock function with given fields:
|
||||
func (_m *Client) FlushAsync() *abcicli.ReqRes {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func() *abcicli.ReqRes); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// FlushSync provides a mock function with given fields:
|
||||
func (_m *Client) FlushSync() error {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// InfoAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) InfoAsync(_a0 types.RequestInfo) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok {
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInfo) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -368,8 +346,24 @@ func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// InfoSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) InfoSync(_a0 types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInfo) *types.ResponseInfo); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInfo)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(types.RequestInfo) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -378,96 +372,29 @@ func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// FlushSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) FlushSync(_a0 context.Context) error {
|
||||
// InitChainAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) InitChainAsync(_a0 types.RequestInitChain) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInitChain) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// InfoAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InfoSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *types.ResponseInfo); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInfo)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChainAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChainSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InitChainSync(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// InitChainSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) InitChainSync(_a0 types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseInitChain
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInitChain) *types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInitChain)
|
||||
@@ -475,8 +402,8 @@ func (_m *Client) InitChainSync(_a0 context.Context, _a1 types.RequestInitChain)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(1).(func(types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -498,36 +425,29 @@ func (_m *Client) IsRunning() bool {
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListSnapshotsAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// ListSnapshotsAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ListSnapshotsAsync(_a0 types.RequestListSnapshots) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListSnapshotsSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// ListSnapshotsSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseListSnapshots
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseListSnapshots)
|
||||
@@ -535,8 +455,8 @@ func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSn
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(1).(func(types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -544,36 +464,29 @@ func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSn
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// LoadSnapshotChunkAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// LoadSnapshotChunkAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) LoadSnapshotChunkAsync(_a0 types.RequestLoadSnapshotChunk) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
return r0
|
||||
}
|
||||
|
||||
// LoadSnapshotChunkSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// LoadSnapshotChunkSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseLoadSnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk)
|
||||
@@ -581,8 +494,8 @@ func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLo
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(1).(func(types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -590,36 +503,29 @@ func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLo
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// OfferSnapshotAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// OfferSnapshotAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) OfferSnapshotAsync(_a0 types.RequestOfferSnapshot) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
return r0
|
||||
}
|
||||
|
||||
// OfferSnapshotSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// OfferSnapshotSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) OfferSnapshotSync(_a0 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseOfferSnapshot
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseOfferSnapshot)
|
||||
@@ -627,8 +533,8 @@ func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferS
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(1).(func(types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -669,36 +575,29 @@ func (_m *Client) OnStop() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// QueryAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// QueryAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) QueryAsync(_a0 types.RequestQuery) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestQuery) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
return r0
|
||||
}
|
||||
|
||||
// QuerySync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
// QuerySync provides a mock function with given fields: _a0
|
||||
func (_m *Client) QuerySync(_a0 types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseQuery
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *types.ResponseQuery); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(0).(func(types.RequestQuery) *types.ResponseQuery); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseQuery)
|
||||
@@ -706,8 +605,8 @@ func (_m *Client) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
if rf, ok := ret.Get(1).(func(types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -750,6 +649,45 @@ func (_m *Client) SetLogger(_a0 log.Logger) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
// SetOptionAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) SetOptionAsync(_a0 types.RequestSetOption) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestSetOption) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// SetOptionSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) SetOptionSync(_a0 types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseSetOption
|
||||
if rf, ok := ret.Get(0).(func(types.RequestSetOption) *types.ResponseSetOption); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseSetOption)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestSetOption) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// SetResponseCallback provides a mock function with given fields: _a0
|
||||
func (_m *Client) SetResponseCallback(_a0 abcicli.Callback) {
|
||||
_m.Called(_a0)
|
||||
|
||||
@@ -3,7 +3,6 @@ package abcicli
|
||||
import (
|
||||
"bufio"
|
||||
"container/list"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -19,18 +18,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
// reqQueueSize is the max number of queued async requests.
|
||||
// (memory: 256MB max assuming 1MB transactions)
|
||||
reqQueueSize = 256
|
||||
// Don't wait longer than...
|
||||
flushThrottleMS = 20
|
||||
reqQueueSize = 256 // TODO make configurable
|
||||
flushThrottleMS = 20 // Don't wait longer than...
|
||||
)
|
||||
|
||||
type reqResWithContext struct {
|
||||
R *ReqRes
|
||||
C context.Context // if context.Err is not nil, reqRes will be thrown away (ignored)
|
||||
}
|
||||
|
||||
// This is goroutine-safe, but users should beware that the application in
|
||||
// general is not meant to be interfaced with concurrent callers.
|
||||
type socketClient struct {
|
||||
@@ -40,7 +31,7 @@ type socketClient struct {
|
||||
mustConnect bool
|
||||
conn net.Conn
|
||||
|
||||
reqQueue chan *reqResWithContext
|
||||
reqQueue chan *ReqRes
|
||||
flushTimer *timer.ThrottleTimer
|
||||
|
||||
mtx tmsync.Mutex
|
||||
@@ -56,7 +47,7 @@ var _ Client = (*socketClient)(nil)
|
||||
// if it fails to connect.
|
||||
func NewSocketClient(addr string, mustConnect bool) Client {
|
||||
cli := &socketClient{
|
||||
reqQueue: make(chan *reqResWithContext, reqQueueSize),
|
||||
reqQueue: make(chan *ReqRes, reqQueueSize),
|
||||
flushTimer: timer.NewThrottleTimer("socketClient", flushThrottleMS),
|
||||
mustConnect: mustConnect,
|
||||
|
||||
@@ -132,20 +123,15 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
|
||||
case reqres := <-cli.reqQueue:
|
||||
// cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request)
|
||||
|
||||
if reqres.C.Err() != nil {
|
||||
cli.Logger.Debug("Request's context is done", "req", reqres.R, "err", reqres.C.Err())
|
||||
continue
|
||||
}
|
||||
|
||||
cli.willSendReq(reqres.R)
|
||||
err := types.WriteMessage(reqres.R.Request, w)
|
||||
cli.willSendReq(reqres)
|
||||
err := types.WriteMessage(reqres.Request, w)
|
||||
if err != nil {
|
||||
cli.stopForError(fmt.Errorf("write to buffer: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
// If it's a flush request, flush the current buffer.
|
||||
if _, ok := reqres.R.Request.Value.(*types.Request_Flush); ok {
|
||||
if _, ok := reqres.Request.Value.(*types.Request_Flush); ok {
|
||||
err = w.Flush()
|
||||
if err != nil {
|
||||
cli.stopForError(fmt.Errorf("flush buffer: %w", err))
|
||||
@@ -154,7 +140,7 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
|
||||
}
|
||||
case <-cli.flushTimer.Ch: // flush queue
|
||||
select {
|
||||
case cli.reqQueue <- &reqResWithContext{R: NewReqRes(types.ToRequestFlush()), C: context.Background()}:
|
||||
case cli.reqQueue <- NewReqRes(types.ToRequestFlush()):
|
||||
default:
|
||||
// Probably will fill the buffer, or retry later.
|
||||
}
|
||||
@@ -226,273 +212,218 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
//
|
||||
// NOTE: It is possible this callback isn't set on the reqres object. At this
|
||||
// point, in which case it will be called after, when it is set.
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(res)
|
||||
}
|
||||
reqres.InvokeCallback()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestEcho(msg))
|
||||
func (cli *socketClient) EchoAsync(msg string) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestEcho(msg))
|
||||
}
|
||||
|
||||
func (cli *socketClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestFlush())
|
||||
func (cli *socketClient) FlushAsync() *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestFlush())
|
||||
}
|
||||
|
||||
func (cli *socketClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestInfo(req))
|
||||
func (cli *socketClient) InfoAsync(req types.RequestInfo) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestInfo(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestDeliverTx(req))
|
||||
func (cli *socketClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestSetOption(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req))
|
||||
func (cli *socketClient) DeliverTxAsync(req types.RequestDeliverTx) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestDeliverTx(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestQuery(req))
|
||||
func (cli *socketClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestCheckTx(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestCommit())
|
||||
func (cli *socketClient) QueryAsync(req types.RequestQuery) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestQuery(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestInitChain(req))
|
||||
func (cli *socketClient) CommitAsync() *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestCommit())
|
||||
}
|
||||
|
||||
func (cli *socketClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestBeginBlock(req))
|
||||
func (cli *socketClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestInitChain(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestEndBlock(req))
|
||||
func (cli *socketClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestBeginBlock(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestListSnapshots(req))
|
||||
func (cli *socketClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestEndBlock(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestOfferSnapshot(req))
|
||||
func (cli *socketClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestListSnapshots(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
func (cli *socketClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestOfferSnapshot(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
func (cli *socketClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestLoadSnapshotChunk(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestApplySnapshotChunk(req))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) FlushSync(ctx context.Context) error {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestFlush(), true)
|
||||
if err != nil {
|
||||
return queueErr(err)
|
||||
}
|
||||
|
||||
func (cli *socketClient) FlushSync() error {
|
||||
reqRes := cli.queueRequest(types.ToRequestFlush())
|
||||
if err := cli.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
gotResp := make(chan struct{})
|
||||
go func() {
|
||||
// NOTE: if we don't flush the queue, its possible to get stuck here
|
||||
reqRes.Wait()
|
||||
close(gotResp)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-gotResp:
|
||||
return cli.Error()
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
reqRes.Wait() // NOTE: if we don't flush the queue, its possible to get stuck here
|
||||
return cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEcho(msg))
|
||||
if err != nil {
|
||||
func (cli *socketClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestEcho(msg))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetEcho(), nil
|
||||
|
||||
return reqres.Response.GetEcho(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) InfoSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInfo,
|
||||
) (*types.ResponseInfo, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInfo(req))
|
||||
if err != nil {
|
||||
func (cli *socketClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestInfo(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetInfo(), nil
|
||||
|
||||
return reqres.Response.GetInfo(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestDeliverTx(req))
|
||||
if err != nil {
|
||||
func (cli *socketClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestSetOption(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetDeliverTx(), nil
|
||||
|
||||
return reqres.Response.GetSetOption(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCheckTx(req))
|
||||
if err != nil {
|
||||
func (cli *socketClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestDeliverTx(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetCheckTx(), nil
|
||||
|
||||
return reqres.Response.GetDeliverTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestQuery(req))
|
||||
if err != nil {
|
||||
func (cli *socketClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestCheckTx(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetQuery(), nil
|
||||
|
||||
return reqres.Response.GetCheckTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCommit())
|
||||
if err != nil {
|
||||
func (cli *socketClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestQuery(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetCommit(), nil
|
||||
|
||||
return reqres.Response.GetQuery(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChainSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInitChain(req))
|
||||
if err != nil {
|
||||
func (cli *socketClient) CommitSync() (*types.ResponseCommit, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestCommit())
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetInitChain(), nil
|
||||
|
||||
return reqres.Response.GetCommit(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestBeginBlock(req))
|
||||
if err != nil {
|
||||
func (cli *socketClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestInitChain(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetBeginBlock(), nil
|
||||
|
||||
return reqres.Response.GetInitChain(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEndBlock(req))
|
||||
if err != nil {
|
||||
func (cli *socketClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestBeginBlock(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetEndBlock(), nil
|
||||
|
||||
return reqres.Response.GetBeginBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestListSnapshots(req))
|
||||
if err != nil {
|
||||
func (cli *socketClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestEndBlock(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetListSnapshots(), nil
|
||||
|
||||
return reqres.Response.GetEndBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshotSync(
|
||||
ctx context.Context,
|
||||
req types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestOfferSnapshot(req))
|
||||
if err != nil {
|
||||
func (cli *socketClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestListSnapshots(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetOfferSnapshot(), nil
|
||||
|
||||
return reqres.Response.GetListSnapshots(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestOfferSnapshot(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetOfferSnapshot(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
if err != nil {
|
||||
reqres := cli.queueRequest(types.ToRequestLoadSnapshotChunk(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetLoadSnapshotChunk(), nil
|
||||
|
||||
return reqres.Response.GetLoadSnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
if err != nil {
|
||||
reqres := cli.queueRequest(types.ToRequestApplySnapshotChunk(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetApplySnapshotChunk(), nil
|
||||
return reqres.Response.GetApplySnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
// queueRequest enqueues req onto the queue. If the queue is full, it ether
|
||||
// returns an error (sync=false) or blocks (sync=true).
|
||||
//
|
||||
// When sync=true, ctx can be used to break early. When sync=false, ctx will be
|
||||
// used later to determine if request should be dropped (if ctx.Err is
|
||||
// non-nil).
|
||||
//
|
||||
// The caller is responsible for checking cli.Error.
|
||||
func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, sync bool) (*ReqRes, error) {
|
||||
func (cli *socketClient) queueRequest(req *types.Request) *ReqRes {
|
||||
reqres := NewReqRes(req)
|
||||
|
||||
if sync {
|
||||
select {
|
||||
case cli.reqQueue <- &reqResWithContext{R: reqres, C: context.Background()}:
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case cli.reqQueue <- &reqResWithContext{R: reqres, C: ctx}:
|
||||
default:
|
||||
return nil, errors.New("buffer is full")
|
||||
}
|
||||
}
|
||||
// TODO: set cli.err if reqQueue times out
|
||||
cli.reqQueue <- reqres
|
||||
|
||||
// Maybe auto-flush, or unset auto-flush
|
||||
switch req.Value.(type) {
|
||||
@@ -502,41 +433,7 @@ func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, s
|
||||
cli.flushTimer.Set()
|
||||
}
|
||||
|
||||
return reqres, nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) queueRequestAsync(
|
||||
ctx context.Context,
|
||||
req *types.Request,
|
||||
) (*ReqRes, error) {
|
||||
|
||||
reqres, err := cli.queueRequest(ctx, req, false)
|
||||
if err != nil {
|
||||
return nil, queueErr(err)
|
||||
}
|
||||
|
||||
return reqres, cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) queueRequestAndFlushSync(
|
||||
ctx context.Context,
|
||||
req *types.Request,
|
||||
) (*ReqRes, error) {
|
||||
|
||||
reqres, err := cli.queueRequest(ctx, req, true)
|
||||
if err != nil {
|
||||
return nil, queueErr(err)
|
||||
}
|
||||
|
||||
if err := cli.FlushSync(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres, cli.Error()
|
||||
}
|
||||
|
||||
func queueErr(e error) error {
|
||||
return fmt.Errorf("can't queue req: %w", e)
|
||||
return reqres
|
||||
}
|
||||
|
||||
func (cli *socketClient) flushQueue() {
|
||||
@@ -554,7 +451,7 @@ LOOP:
|
||||
for {
|
||||
select {
|
||||
case reqres := <-cli.reqQueue:
|
||||
reqres.R.Done()
|
||||
reqres.Done()
|
||||
default:
|
||||
break LOOP
|
||||
}
|
||||
@@ -571,6 +468,8 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_Flush)
|
||||
case *types.Request_Info:
|
||||
_, ok = res.Value.(*types.Response_Info)
|
||||
case *types.Request_SetOption:
|
||||
_, ok = res.Value.(*types.Response_SetOption)
|
||||
case *types.Request_DeliverTx:
|
||||
_, ok = res.Value.(*types.Response_DeliverTx)
|
||||
case *types.Request_CheckTx:
|
||||
@@ -603,10 +502,12 @@ func (cli *socketClient) stopForError(err error) {
|
||||
}
|
||||
|
||||
cli.mtx.Lock()
|
||||
cli.err = err
|
||||
if cli.err == nil {
|
||||
cli.err = err
|
||||
}
|
||||
cli.mtx.Unlock()
|
||||
|
||||
cli.Logger.Info("Stopping abci.socketClient", "reason", err)
|
||||
cli.Logger.Error(fmt.Sprintf("Stopping abci.socketClient for error: %v", err.Error()))
|
||||
if err := cli.Stop(); err != nil {
|
||||
cli.Logger.Error("Error stopping abci.socketClient", "err", err)
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
package abcicli_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -16,8 +16,6 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func TestProperSyncCalls(t *testing.T) {
|
||||
app := slowApp{}
|
||||
|
||||
@@ -36,12 +34,11 @@ func TestProperSyncCalls(t *testing.T) {
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// This is BeginBlockSync unrolled....
|
||||
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||
assert.NoError(t, err)
|
||||
err = c.FlushSync(context.Background())
|
||||
assert.NoError(t, err)
|
||||
reqres := c.BeginBlockAsync(types.RequestBeginBlock{})
|
||||
err := c.FlushSync()
|
||||
require.NoError(t, err)
|
||||
res := reqres.Response.GetBeginBlock()
|
||||
assert.NotNil(t, res)
|
||||
require.NotNil(t, res)
|
||||
resp <- c.Error()
|
||||
}()
|
||||
|
||||
@@ -72,16 +69,14 @@ func TestHangingSyncCalls(t *testing.T) {
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// Start BeginBlock and flush it
|
||||
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||
assert.NoError(t, err)
|
||||
flush, err := c.FlushAsync(ctx)
|
||||
assert.NoError(t, err)
|
||||
reqres := c.BeginBlockAsync(types.RequestBeginBlock{})
|
||||
flush := c.FlushAsync()
|
||||
// wait 20 ms for all events to travel socket, but
|
||||
// no response yet from server
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
// kill the server, so the connections break
|
||||
err = s.Stop()
|
||||
assert.NoError(t, err)
|
||||
err := s.Stop()
|
||||
require.NoError(t, err)
|
||||
|
||||
// wait for the response from BeginBlock
|
||||
reqres.Wait()
|
||||
@@ -124,3 +119,71 @@ func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
// TestCallbackInvokedWhenSetLaet ensures that the callback is invoked when
|
||||
// set after the client completes the call into the app. Currently this
|
||||
// test relies on the callback being allowed to be invoked twice if set multiple
|
||||
// times, once when set early and once when set late.
|
||||
func TestCallbackInvokedWhenSetLate(t *testing.T) {
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
app := blockedABCIApplication{
|
||||
wg: wg,
|
||||
}
|
||||
_, c := setupClientServer(t, app)
|
||||
reqRes := c.CheckTxAsync(types.RequestCheckTx{})
|
||||
|
||||
done := make(chan struct{})
|
||||
cb := func(_ *types.Response) {
|
||||
close(done)
|
||||
}
|
||||
reqRes.SetCallback(cb)
|
||||
app.wg.Done()
|
||||
<-done
|
||||
|
||||
var called bool
|
||||
cb = func(_ *types.Response) {
|
||||
called = true
|
||||
}
|
||||
reqRes.SetCallback(cb)
|
||||
require.True(t, called)
|
||||
}
|
||||
|
||||
type blockedABCIApplication struct {
|
||||
wg *sync.WaitGroup
|
||||
types.BaseApplication
|
||||
}
|
||||
|
||||
func (b blockedABCIApplication) CheckTx(r types.RequestCheckTx) types.ResponseCheckTx {
|
||||
b.wg.Wait()
|
||||
return b.BaseApplication.CheckTx(r)
|
||||
}
|
||||
|
||||
// TestCallbackInvokedWhenSetEarly ensures that the callback is invoked when
|
||||
// set before the client completes the call into the app.
|
||||
func TestCallbackInvokedWhenSetEarly(t *testing.T) {
|
||||
wg := &sync.WaitGroup{}
|
||||
wg.Add(1)
|
||||
app := blockedABCIApplication{
|
||||
wg: wg,
|
||||
}
|
||||
_, c := setupClientServer(t, app)
|
||||
reqRes := c.CheckTxAsync(types.RequestCheckTx{})
|
||||
|
||||
done := make(chan struct{})
|
||||
cb := func(_ *types.Response) {
|
||||
close(done)
|
||||
}
|
||||
reqRes.SetCallback(cb)
|
||||
app.wg.Done()
|
||||
|
||||
called := func() bool {
|
||||
select {
|
||||
case <-done:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
require.Eventually(t, called, time.Second, time.Millisecond*25)
|
||||
}
|
||||
|
||||
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -30,8 +29,6 @@ import (
|
||||
var (
|
||||
client abcicli.Client
|
||||
logger log.Logger
|
||||
|
||||
ctx = context.Background()
|
||||
)
|
||||
|
||||
// flags
|
||||
@@ -151,6 +148,7 @@ func addCommands() {
|
||||
RootCmd.AddCommand(consoleCmd)
|
||||
RootCmd.AddCommand(echoCmd)
|
||||
RootCmd.AddCommand(infoCmd)
|
||||
RootCmd.AddCommand(setOptionCmd)
|
||||
RootCmd.AddCommand(deliverTxCmd)
|
||||
RootCmd.AddCommand(checkTxCmd)
|
||||
RootCmd.AddCommand(commitCmd)
|
||||
@@ -178,6 +176,7 @@ you'd like to run:
|
||||
|
||||
where example.file looks something like:
|
||||
|
||||
set_option serial on
|
||||
check_tx 0x00
|
||||
check_tx 0xff
|
||||
deliver_tx 0x00
|
||||
@@ -199,7 +198,7 @@ This command opens an interactive console for running any of the other commands
|
||||
without opening a new connection each time
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
ValidArgs: []string{"echo", "info", "deliver_tx", "check_tx", "commit", "query"},
|
||||
ValidArgs: []string{"echo", "info", "set_option", "deliver_tx", "check_tx", "commit", "query"},
|
||||
RunE: cmdConsole,
|
||||
}
|
||||
|
||||
@@ -217,6 +216,13 @@ var infoCmd = &cobra.Command{
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdInfo,
|
||||
}
|
||||
var setOptionCmd = &cobra.Command{
|
||||
Use: "set_option",
|
||||
Short: "set an option on the application",
|
||||
Long: "set an option on the application",
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: cmdSetOption,
|
||||
}
|
||||
|
||||
var deliverTxCmd = &cobra.Command{
|
||||
Use: "deliver_tx",
|
||||
@@ -318,6 +324,7 @@ func cmdTest(cmd *cobra.Command, args []string) error {
|
||||
return compose(
|
||||
[]func() error{
|
||||
func() error { return servertest.InitChain(client) },
|
||||
func() error { return servertest.SetOption(client, "serial", "on") },
|
||||
func() error { return servertest.Commit(client, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil) },
|
||||
func() error { return servertest.Commit(client, nil) },
|
||||
@@ -432,6 +439,8 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
|
||||
return cmdInfo(cmd, actualArgs)
|
||||
case "query":
|
||||
return cmdQuery(cmd, actualArgs)
|
||||
case "set_option":
|
||||
return cmdSetOption(cmd, actualArgs)
|
||||
default:
|
||||
return cmdUnimplemented(cmd, pArgs)
|
||||
}
|
||||
@@ -455,6 +464,7 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short)
|
||||
fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short)
|
||||
fmt.Printf("%s: %s\n", setOptionCmd.Use, setOptionCmd.Short)
|
||||
fmt.Println("Use \"[command] --help\" for more information about a command.")
|
||||
|
||||
return nil
|
||||
@@ -466,7 +476,7 @@ func cmdEcho(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
msg = args[0]
|
||||
}
|
||||
res, err := client.EchoSync(ctx, msg)
|
||||
res, err := client.EchoSync(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -482,7 +492,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 1 {
|
||||
version = args[0]
|
||||
}
|
||||
res, err := client.InfoSync(ctx, types.RequestInfo{Version: version})
|
||||
res, err := client.InfoSync(types.RequestInfo{Version: version})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -494,6 +504,25 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
|
||||
|
||||
const codeBad uint32 = 10
|
||||
|
||||
// Set an option on the application
|
||||
func cmdSetOption(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 2 {
|
||||
printResponse(cmd, args, response{
|
||||
Code: codeBad,
|
||||
Log: "want at least arguments of the form: <key> <value>",
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
key, val := args[0], args[1]
|
||||
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: val})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
printResponse(cmd, args, response{Log: "OK (SetOption doesn't return anything.)"}) // NOTE: Nothing to show...
|
||||
return nil
|
||||
}
|
||||
|
||||
// Append a new tx to application
|
||||
func cmdDeliverTx(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
@@ -507,7 +536,7 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
res, err := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -533,7 +562,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes})
|
||||
res, err := client.CheckTxSync(types.RequestCheckTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -548,7 +577,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Get application Merkle root hash
|
||||
func cmdCommit(cmd *cobra.Command, args []string) error {
|
||||
res, err := client.CommitSync(ctx)
|
||||
res, err := client.CommitSync()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -573,7 +602,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resQuery, err := client.QuerySync(ctx, types.RequestQuery{
|
||||
resQuery, err := client.QuerySync(types.RequestQuery{
|
||||
Data: queryBytes,
|
||||
Path: flagPath,
|
||||
Height: int64(flagHeight),
|
||||
|
||||
@@ -24,6 +24,24 @@ func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
|
||||
}
|
||||
|
||||
func (app *Application) SetOption(req types.RequestSetOption) types.ResponseSetOption {
|
||||
key, value := req.Key, req.Value
|
||||
if key == "serial" && value == "on" {
|
||||
app.serial = true
|
||||
} else {
|
||||
/*
|
||||
TODO Panic and have the ABCI server pass an exception.
|
||||
The client can call SetOptionSync() and get an `error`.
|
||||
return types.ResponseSetOption{
|
||||
Error: fmt.Sprintf("Unknown key (%s) or value (%s)", key, value),
|
||||
}
|
||||
*/
|
||||
return types.ResponseSetOption{}
|
||||
}
|
||||
|
||||
return types.ResponseSetOption{}
|
||||
}
|
||||
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
@@ -51,7 +69,6 @@ func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package example
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
@@ -14,6 +13,8 @@ import (
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
|
||||
@@ -44,7 +45,7 @@ func TestGRPC(t *testing.T) {
|
||||
}
|
||||
|
||||
func testStream(t *testing.T, app types.Application) {
|
||||
const numDeliverTxs = 20000
|
||||
numDeliverTxs := 20000
|
||||
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
socket := fmt.Sprintf("unix://%v", socketFile)
|
||||
@@ -52,8 +53,9 @@ func testStream(t *testing.T, app types.Application) {
|
||||
// Start the listener
|
||||
server := abciserver.NewSocketServer(socket, app)
|
||||
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
|
||||
err := server.Start()
|
||||
require.NoError(t, err)
|
||||
if err := server.Start(); err != nil {
|
||||
require.NoError(t, err, "Error starting socket server")
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
@@ -63,8 +65,9 @@ func testStream(t *testing.T, app types.Application) {
|
||||
// Connect to the socket
|
||||
client := abcicli.NewSocketClient(socket, false)
|
||||
client.SetLogger(log.TestingLogger().With("module", "abci-client"))
|
||||
err = client.Start()
|
||||
require.NoError(t, err)
|
||||
if err := client.Start(); err != nil {
|
||||
t.Fatalf("Error starting socket client: %v", err.Error())
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
@@ -98,24 +101,22 @@ func testStream(t *testing.T, app types.Application) {
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
// Send request
|
||||
_, err = client.DeliverTxAsync(ctx, types.RequestDeliverTx{Tx: []byte("test")})
|
||||
require.NoError(t, err)
|
||||
reqRes := client.DeliverTxAsync(types.RequestDeliverTx{Tx: []byte("test")})
|
||||
_ = reqRes
|
||||
// check err ?
|
||||
|
||||
// Sometimes send flush messages
|
||||
if counter%128 == 0 {
|
||||
err = client.FlushSync(context.Background())
|
||||
require.NoError(t, err)
|
||||
if counter%123 == 0 {
|
||||
client.FlushAsync()
|
||||
// check err ?
|
||||
}
|
||||
}
|
||||
|
||||
// Send final flush message
|
||||
_, err = client.FlushAsync(ctx)
|
||||
require.NoError(t, err)
|
||||
client.FlushAsync()
|
||||
|
||||
<-done
|
||||
}
|
||||
@@ -129,7 +130,7 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
|
||||
func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
|
||||
numDeliverTxs := 2000
|
||||
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
|
||||
socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
socket := fmt.Sprintf("unix://%v", socketFile)
|
||||
|
||||
@@ -147,6 +148,7 @@ func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
|
||||
})
|
||||
|
||||
// Connect to the socket
|
||||
//nolint:staticcheck // SA1019 Existing use of deprecated but supported dial option.
|
||||
conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
t.Fatalf("Error dialing GRPC server: %v", err.Error())
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
package kvstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
@@ -24,8 +23,6 @@ const (
|
||||
testValue = "def"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||
req := types.RequestDeliverTx{Tx: tx}
|
||||
ar := app.DeliverTx(req)
|
||||
@@ -74,7 +71,7 @@ func TestKVStoreKV(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreKV(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -90,7 +87,7 @@ func TestPersistentKVStoreKV(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -117,12 +114,11 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
if resInfo.LastBlockHeight != height {
|
||||
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// add a validator, remove a validator, update a validator
|
||||
func TestValUpdates(t *testing.T) {
|
||||
dir, err := ioutil.TempDir("/tmp", "abci-kvstore-test") // TODO
|
||||
dir, err := os.MkdirTemp("/tmp", "abci-kvstore-test") // TODO
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -165,7 +161,7 @@ func TestValUpdates(t *testing.T) {
|
||||
|
||||
makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3)
|
||||
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) // nolint: gocritic
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) //nolint: gocritic
|
||||
vals2 = kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
@@ -184,7 +180,6 @@ func TestValUpdates(t *testing.T) {
|
||||
vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...)
|
||||
vals2 = kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
}
|
||||
|
||||
func makeApplyBlock(
|
||||
@@ -192,7 +187,8 @@ func makeApplyBlock(
|
||||
kvstore types.Application,
|
||||
heightInt int,
|
||||
diff []types.ValidatorUpdate,
|
||||
txs ...[]byte) {
|
||||
txs ...[]byte,
|
||||
) {
|
||||
// make and apply block
|
||||
height := int64(heightInt)
|
||||
hash := []byte("foo")
|
||||
@@ -210,7 +206,6 @@ func makeApplyBlock(
|
||||
kvstore.Commit()
|
||||
|
||||
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
|
||||
|
||||
}
|
||||
|
||||
// order doesn't matter
|
||||
@@ -296,7 +291,7 @@ func TestClientServer(t *testing.T) {
|
||||
|
||||
// set up grpc app
|
||||
kvstore = NewApplication()
|
||||
gclient, gserver, err := makeGRPCClientServer(kvstore, "kvstore-grpc")
|
||||
gclient, gserver, err := makeGRPCClientServer(kvstore, "/tmp/kvstore-grpc")
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() {
|
||||
@@ -326,23 +321,23 @@ func runClientTests(t *testing.T, client abcicli.Client) {
|
||||
}
|
||||
|
||||
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
|
||||
ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
ar, err := app.DeliverTxSync(types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// repeating tx doesn't raise error
|
||||
ar, err = app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
ar, err = app.DeliverTxSync(types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// commit
|
||||
_, err = app.CommitSync(ctx)
|
||||
_, err = app.CommitSync()
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := app.InfoSync(ctx, types.RequestInfo{})
|
||||
info, err := app.InfoSync(types.RequestInfo{})
|
||||
require.NoError(t, err)
|
||||
require.NotZero(t, info.LastBlockHeight)
|
||||
|
||||
// make sure query is fine
|
||||
resQuery, err := app.QuerySync(ctx, types.RequestQuery{
|
||||
resQuery, err := app.QuerySync(types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
})
|
||||
@@ -353,7 +348,7 @@ func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string)
|
||||
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
|
||||
|
||||
// make sure proof is fine
|
||||
resQuery, err = app.QuerySync(ctx, types.RequestQuery{
|
||||
resQuery, err = app.QuerySync(types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
Prove: true,
|
||||
|
||||
@@ -62,6 +62,10 @@ func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.Respo
|
||||
return res
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption {
|
||||
return app.app.SetOption(req)
|
||||
}
|
||||
|
||||
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
// if it starts with "val:", update the validator set
|
||||
|
||||
@@ -2,9 +2,8 @@
|
||||
Package server is used to start a new ABCI server.
|
||||
|
||||
It contains two server implementation:
|
||||
* gRPC server
|
||||
* socket server
|
||||
|
||||
- gRPC server
|
||||
- socket server
|
||||
*/
|
||||
package server
|
||||
|
||||
|
||||
@@ -200,6 +200,9 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
case *types.Request_Info:
|
||||
res := s.app.Info(*r.Info)
|
||||
responses <- types.ToResponseInfo(res)
|
||||
case *types.Request_SetOption:
|
||||
res := s.app.SetOption(*r.SetOption)
|
||||
responses <- types.ToResponseSetOption(res)
|
||||
case *types.Request_DeliverTx:
|
||||
res := s.app.DeliverTx(*r.DeliverTx)
|
||||
responses <- types.ToResponseDeliverTx(res)
|
||||
|
||||
@@ -2,7 +2,6 @@ package testsuite
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
@@ -11,8 +10,6 @@ import (
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func InitChain(client abcicli.Client) error {
|
||||
total := 10
|
||||
vals := make([]types.ValidatorUpdate, total)
|
||||
@@ -21,7 +18,7 @@ func InitChain(client abcicli.Client) error {
|
||||
power := tmrand.Int()
|
||||
vals[i] = types.UpdateValidator(pubkey, int64(power), "")
|
||||
}
|
||||
_, err := client.InitChainSync(ctx, types.RequestInitChain{
|
||||
_, err := client.InitChainSync(types.RequestInitChain{
|
||||
Validators: vals,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -32,8 +29,19 @@ func InitChain(client abcicli.Client) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetOption(client abcicli.Client, key, value string) error {
|
||||
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value})
|
||||
if err != nil {
|
||||
fmt.Println("Failed test: SetOption")
|
||||
fmt.Printf("error while setting %v=%v: \nerror: %v\n", key, value, err)
|
||||
return err
|
||||
}
|
||||
fmt.Println("Passed test: SetOption")
|
||||
return nil
|
||||
}
|
||||
|
||||
func Commit(client abcicli.Client, hashExp []byte) error {
|
||||
res, err := client.CommitSync(ctx)
|
||||
res, err := client.CommitSync()
|
||||
data := res.Data
|
||||
if err != nil {
|
||||
fmt.Println("Failed test: Commit")
|
||||
@@ -50,7 +58,7 @@ func Commit(client abcicli.Client, hashExp []byte) error {
|
||||
}
|
||||
|
||||
func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
res, _ := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
@@ -69,7 +77,7 @@ func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []
|
||||
}
|
||||
|
||||
func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes})
|
||||
res, _ := client.CheckTxSync(types.RequestCheckTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: CheckTx")
|
||||
|
||||
@@ -2,7 +2,6 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
@@ -11,8 +10,6 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func startClient(abciType string) abcicli.Client {
|
||||
// Start client
|
||||
client, err := abcicli.NewClient("tcp://127.0.0.1:26658", abciType, true)
|
||||
@@ -28,8 +25,15 @@ func startClient(abciType string) abcicli.Client {
|
||||
return client
|
||||
}
|
||||
|
||||
func setOption(client abcicli.Client, key, value string) {
|
||||
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value})
|
||||
if err != nil {
|
||||
panicf("setting %v=%v: \nerr: %v", key, value, err)
|
||||
}
|
||||
}
|
||||
|
||||
func commit(client abcicli.Client, hashExp []byte) {
|
||||
res, err := client.CommitSync(ctx)
|
||||
res, err := client.CommitSync()
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
@@ -39,7 +43,7 @@ func commit(client abcicli.Client, hashExp []byte) {
|
||||
}
|
||||
|
||||
func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
|
||||
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
res, err := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
@@ -51,6 +55,24 @@ func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []
|
||||
}
|
||||
}
|
||||
|
||||
/*func checkTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
|
||||
res, err := client.CheckTxSync(txBytes)
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if res.IsErr() {
|
||||
panicf("checking tx %X: %v\nlog: %v", txBytes, res.Log)
|
||||
}
|
||||
if res.Code != codeExp {
|
||||
panicf("CheckTx response code was unexpected. Got %v expected %v. Log: %v",
|
||||
res.Code, codeExp, res.Log)
|
||||
}
|
||||
if !bytes.Equal(res.Data, dataExp) {
|
||||
panicf("CheckTx response data was unexpected. Got %X expected %X",
|
||||
res.Data, dataExp)
|
||||
}
|
||||
}*/
|
||||
|
||||
func panicf(format string, a ...interface{}) {
|
||||
panic(fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
@@ -40,7 +41,7 @@ func ensureABCIIsUp(typ string, n int) error {
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
<-time.After(500 * time.Millisecond)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -78,16 +79,17 @@ func testCounter() {
|
||||
}
|
||||
}()
|
||||
|
||||
// commit(client, nil)
|
||||
// deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
|
||||
setOption(client, "serial", "on")
|
||||
commit(client, nil)
|
||||
deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
|
||||
commit(client, nil)
|
||||
deliverTx(client, []byte{0x00}, types.CodeTypeOK, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
|
||||
// deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
|
||||
deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
|
||||
deliverTx(client, []byte{0x01}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x02}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x03}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x00, 0x04}, types.CodeTypeOK, nil)
|
||||
// deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
|
||||
}
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
set_option serial on
|
||||
check_tx 0x00
|
||||
check_tx 0xff
|
||||
deliver_tx 0x00
|
||||
|
||||
@@ -1,3 +1,7 @@
|
||||
> set_option serial on
|
||||
-> code: OK
|
||||
-> log: OK (SetOption doesn't return anything.)
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: OK
|
||||
|
||||
@@ -8,16 +12,18 @@
|
||||
-> code: OK
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: OK
|
||||
-> code: 2
|
||||
-> log: Invalid nonce. Expected >= 1, got 0
|
||||
|
||||
> deliver_tx 0x01
|
||||
-> code: OK
|
||||
|
||||
> deliver_tx 0x04
|
||||
-> code: OK
|
||||
-> code: 2
|
||||
-> log: Invalid nonce. Expected 2, got 4
|
||||
|
||||
> info
|
||||
-> code: OK
|
||||
-> data: {"hashes":0,"txs":3}
|
||||
-> data.hex: 0x7B22686173686573223A302C22747873223A337D
|
||||
-> data: {"hashes":0,"txs":2}
|
||||
-> data.hex: 0x7B22686173686573223A302C22747873223A327D
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"context"
|
||||
context "golang.org/x/net/context"
|
||||
)
|
||||
|
||||
// Application is an interface that enables any finite, deterministic state machine
|
||||
@@ -10,8 +10,9 @@ import (
|
||||
// except CheckTx/DeliverTx, which take `tx []byte`, and `Commit`, which takes nothing.
|
||||
type Application interface {
|
||||
// Info/Query Connection
|
||||
Info(RequestInfo) ResponseInfo // Return application info
|
||||
Query(RequestQuery) ResponseQuery // Query for state
|
||||
Info(RequestInfo) ResponseInfo // Return application info
|
||||
SetOption(RequestSetOption) ResponseSetOption // Set application option
|
||||
Query(RequestQuery) ResponseQuery // Query for state
|
||||
|
||||
// Mempool Connection
|
||||
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
|
||||
@@ -46,6 +47,10 @@ func (BaseApplication) Info(req RequestInfo) ResponseInfo {
|
||||
return ResponseInfo{}
|
||||
}
|
||||
|
||||
func (BaseApplication) SetOption(req RequestSetOption) ResponseSetOption {
|
||||
return ResponseSetOption{}
|
||||
}
|
||||
|
||||
func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx {
|
||||
return ResponseDeliverTx{Code: CodeTypeOK}
|
||||
}
|
||||
@@ -114,6 +119,11 @@ func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*Respon
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) SetOption(ctx context.Context, req *RequestSetOption) (*ResponseSetOption, error) {
|
||||
res := app.app.SetOption(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
|
||||
res := app.app.DeliverTx(*req)
|
||||
return &res, nil
|
||||
|
||||
@@ -87,6 +87,12 @@ func ToRequestInfo(req RequestInfo) *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestSetOption(req RequestSetOption) *Request {
|
||||
return &Request{
|
||||
Value: &Request_SetOption{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestDeliverTx(req RequestDeliverTx) *Request {
|
||||
return &Request{
|
||||
Value: &Request_DeliverTx{&req},
|
||||
@@ -178,6 +184,13 @@ func ToResponseInfo(res ResponseInfo) *Response {
|
||||
Value: &Response_Info{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseSetOption(res ResponseSetOption) *Response {
|
||||
return &Response{
|
||||
Value: &Response_SetOption{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseDeliverTx(res ResponseDeliverTx) *Response {
|
||||
return &Response{
|
||||
Value: &Response_DeliverTx{&res},
|
||||
|
||||
@@ -42,7 +42,7 @@ func (r ResponseQuery) IsErr() bool {
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// override JSON marshalling so we emit defaults (ie. disable omitempty)
|
||||
// override JSON marshaling so we emit defaults (ie. disable omitempty)
|
||||
|
||||
var (
|
||||
jsonpbMarshaller = jsonpb.Marshaler{
|
||||
@@ -52,6 +52,16 @@ var (
|
||||
jsonpbUnmarshaller = jsonpb.Unmarshaler{}
|
||||
)
|
||||
|
||||
func (r *ResponseSetOption) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
}
|
||||
|
||||
func (r *ResponseSetOption) UnmarshalJSON(b []byte) error {
|
||||
reader := bytes.NewBuffer(b)
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
|
||||
func (r *ResponseCheckTx) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
@@ -116,5 +126,6 @@ var _ jsonRoundTripper = (*ResponseCommit)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseQuery)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseDeliverTx)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseCheckTx)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseSetOption)(nil)
|
||||
|
||||
var _ jsonRoundTripper = (*EventAttribute)(nil)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
12
appveyor.yml
Normal file
12
appveyor.yml
Normal file
@@ -0,0 +1,12 @@
|
||||
version: 1.0.{build}
|
||||
configuration: Release
|
||||
platform:
|
||||
- x64
|
||||
- x86
|
||||
clone_folder: c:\go\path\src\github.com\tendermint\tendermint
|
||||
before_build:
|
||||
- cmd: set GOPATH=%GOROOT%\path
|
||||
- cmd: set PATH=%GOPATH%\bin;%PATH%
|
||||
build_script:
|
||||
- cmd: make test
|
||||
test: off
|
||||
@@ -8,35 +8,34 @@ There are four different behaviours a reactor can report.
|
||||
|
||||
1. bad message
|
||||
|
||||
type badMessage struct {
|
||||
explanation string
|
||||
}
|
||||
type badMessage struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be stopped for an error
|
||||
# This message will request the peer be stopped for an error
|
||||
|
||||
2. message out of order
|
||||
|
||||
type messageOutOfOrder struct {
|
||||
explanation string
|
||||
}
|
||||
type messageOutOfOrder struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be stopped for an error
|
||||
# This message will request the peer be stopped for an error
|
||||
|
||||
3. consesnsus Vote
|
||||
|
||||
type consensusVote struct {
|
||||
explanation string
|
||||
}
|
||||
type consensusVote struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be marked as good
|
||||
# This message will request the peer be marked as good
|
||||
|
||||
4. block part
|
||||
|
||||
type blockPart struct {
|
||||
explanation string
|
||||
}
|
||||
type blockPart struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
This message will request the peer be marked as good
|
||||
|
||||
*/
|
||||
package behaviour
|
||||
|
||||
@@ -1,17 +0,0 @@
|
||||
/*
|
||||
Package blockchain provides two implementations of the fast-sync protocol.
|
||||
|
||||
- v0 was the very first implementation. it's battle tested, but does not have a
|
||||
lot of test coverage.
|
||||
- v2 is the newest implementation, with a focus on testability and readability.
|
||||
|
||||
Check out ADR-40 for the formal model and requirements.
|
||||
|
||||
# Termination criteria
|
||||
|
||||
1. the maximum peer height is reached
|
||||
2. termination timeout is triggered, which is set if the peer set is empty or
|
||||
there are no pending requests.
|
||||
|
||||
*/
|
||||
package blockchain
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -19,58 +20,6 @@ const (
|
||||
BlockResponseMessageFieldKeySize
|
||||
)
|
||||
|
||||
// EncodeMsg encodes a Protobuf message
|
||||
func EncodeMsg(pb proto.Message) ([]byte, error) {
|
||||
msg := bcproto.Message{}
|
||||
|
||||
switch pb := pb.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
msg.Sum = &bcproto.Message_BlockRequest{BlockRequest: pb}
|
||||
case *bcproto.BlockResponse:
|
||||
msg.Sum = &bcproto.Message_BlockResponse{BlockResponse: pb}
|
||||
case *bcproto.NoBlockResponse:
|
||||
msg.Sum = &bcproto.Message_NoBlockResponse{NoBlockResponse: pb}
|
||||
case *bcproto.StatusRequest:
|
||||
msg.Sum = &bcproto.Message_StatusRequest{StatusRequest: pb}
|
||||
case *bcproto.StatusResponse:
|
||||
msg.Sum = &bcproto.Message_StatusResponse{StatusResponse: pb}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", pb)
|
||||
}
|
||||
|
||||
bz, err := proto.Marshal(&msg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to marshal %T: %w", pb, err)
|
||||
}
|
||||
|
||||
return bz, nil
|
||||
}
|
||||
|
||||
// DecodeMsg decodes a Protobuf message.
|
||||
func DecodeMsg(bz []byte) (proto.Message, error) {
|
||||
pb := &bcproto.Message{}
|
||||
|
||||
err := proto.Unmarshal(bz, pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch msg := pb.Sum.(type) {
|
||||
case *bcproto.Message_BlockRequest:
|
||||
return msg.BlockRequest, nil
|
||||
case *bcproto.Message_BlockResponse:
|
||||
return msg.BlockResponse, nil
|
||||
case *bcproto.Message_NoBlockResponse:
|
||||
return msg.NoBlockResponse, nil
|
||||
case *bcproto.Message_StatusRequest:
|
||||
return msg.StatusRequest, nil
|
||||
case *bcproto.Message_StatusResponse:
|
||||
return msg.StatusResponse, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateMsg validates a message.
|
||||
func ValidateMsg(pb proto.Message) error {
|
||||
if pb == nil {
|
||||
@@ -83,8 +32,10 @@ func ValidateMsg(pb proto.Message) error {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
case *bcproto.BlockResponse:
|
||||
// validate basic is called later when converting from proto
|
||||
return nil
|
||||
_, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case *bcproto.NoBlockResponse:
|
||||
if msg.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
@@ -106,3 +57,31 @@ func ValidateMsg(pb proto.Message) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EncodeMsg encodes a Protobuf message
|
||||
//
|
||||
// Deprecated: Will be removed in v0.37.
|
||||
func EncodeMsg(pb proto.Message) ([]byte, error) {
|
||||
if um, ok := pb.(p2p.Wrapper); ok {
|
||||
pb = um.Wrap()
|
||||
}
|
||||
bz, err := proto.Marshal(pb)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to marshal %T: %w", pb, err)
|
||||
}
|
||||
|
||||
return bz, nil
|
||||
}
|
||||
|
||||
// DecodeMsg decodes a Protobuf message.
|
||||
//
|
||||
// Deprecated: Will be removed in v0.37.
|
||||
func DecodeMsg(bz []byte) (proto.Message, error) {
|
||||
pb := &bcproto.Message{}
|
||||
|
||||
err := proto.Unmarshal(bz, pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return pb.Unwrap()
|
||||
}
|
||||
|
||||
@@ -78,7 +78,7 @@ func TestBcStatusResponseMessageValidateBasic(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// nolint:lll // ignore line length in tests
|
||||
//nolint:lll // ignore line length in tests
|
||||
func TestBlockchainMessageVectors(t *testing.T) {
|
||||
block := types.MakeBlock(int64(3), []types.Tx{types.Tx("Hello World")}, nil, nil)
|
||||
block.Version.Block = 11 // overwrite updated protocol version
|
||||
|
||||
@@ -32,6 +32,7 @@ const (
|
||||
maxTotalRequesters = 600
|
||||
maxPendingRequests = maxTotalRequesters
|
||||
maxPendingRequestsPerPeer = 20
|
||||
requestRetrySeconds = 30
|
||||
|
||||
// Minimum recv rate to ensure we're receiving blocks from a peer fast
|
||||
// enough. If a peer is not sending us data at at least that rate, we
|
||||
@@ -58,19 +59,12 @@ var peerTimeout = 15 * time.Second // not const so we can override with tests
|
||||
are not at peer limits, we can probably switch to consensus reactor
|
||||
*/
|
||||
|
||||
// BlockRequest stores a block request identified by the block Height and the
|
||||
// PeerID responsible for delivering the block.
|
||||
type BlockRequest struct {
|
||||
Height int64
|
||||
PeerID p2p.ID
|
||||
}
|
||||
|
||||
// BlockPool keeps track of the fast sync peers, block requests and block responses.
|
||||
type BlockPool struct {
|
||||
service.BaseService
|
||||
lastAdvance time.Time
|
||||
startTime time.Time
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
mtx tmsync.Mutex
|
||||
// block requests
|
||||
requesters map[int64]*bpRequester
|
||||
height int64 // the lowest key in requesters.
|
||||
@@ -105,8 +99,8 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
|
||||
// OnStart implements service.Service by spawning requesters routine and recording
|
||||
// pool's start time.
|
||||
func (pool *BlockPool) OnStart() error {
|
||||
pool.lastAdvance = time.Now()
|
||||
go pool.makeRequestersRoutine()
|
||||
pool.startTime = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -141,7 +135,6 @@ func (pool *BlockPool) removeTimedoutPeers() {
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
for _, peer := range pool.peers {
|
||||
// check if peer timed out
|
||||
if !peer.didTimeout && peer.numPending > 0 {
|
||||
curRate := peer.recvMonitor.Status().CurRate
|
||||
// curRate can be 0 on start
|
||||
@@ -155,7 +148,6 @@ func (pool *BlockPool) removeTimedoutPeers() {
|
||||
peer.didTimeout = true
|
||||
}
|
||||
}
|
||||
|
||||
if peer.didTimeout {
|
||||
pool.removePeer(peer.id)
|
||||
}
|
||||
@@ -165,24 +157,33 @@ func (pool *BlockPool) removeTimedoutPeers() {
|
||||
// GetStatus returns pool's height, numPending requests and the number of
|
||||
// requesters.
|
||||
func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequesters int) {
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
return pool.height, atomic.LoadInt32(&pool.numPending), len(pool.requesters)
|
||||
}
|
||||
|
||||
// IsCaughtUp returns true if this node is caught up, false - otherwise.
|
||||
// TODO: relax conditions, prevent abuse.
|
||||
func (pool *BlockPool) IsCaughtUp() bool {
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
// Need at least 1 peer to be considered caught up.
|
||||
if len(pool.peers) == 0 {
|
||||
pool.Logger.Debug("Blockpool has no peers")
|
||||
return false
|
||||
}
|
||||
// NOTE: we use maxPeerHeight - 1 because to sync block H requires block H+1
|
||||
|
||||
// Some conditions to determine if we're caught up.
|
||||
// Ensures we've either received a block or waited some amount of time,
|
||||
// and that we're synced to the highest known height.
|
||||
// Note we use maxPeerHeight - 1 because to sync block H requires block H+1
|
||||
// to verify the LastCommit.
|
||||
return pool.height >= (pool.maxPeerHeight - 1)
|
||||
receivedBlockOrTimedOut := pool.height > 0 || time.Since(pool.startTime) > 5*time.Second
|
||||
ourChainIsLongestAmongPeers := pool.maxPeerHeight == 0 || pool.height >= (pool.maxPeerHeight-1)
|
||||
isCaughtUp := receivedBlockOrTimedOut && ourChainIsLongestAmongPeers
|
||||
return isCaughtUp
|
||||
}
|
||||
|
||||
// PeekTwoBlocks returns blocks at pool.height and pool.height+1.
|
||||
@@ -190,8 +191,8 @@ func (pool *BlockPool) IsCaughtUp() bool {
|
||||
// So we peek two blocks at a time.
|
||||
// The caller will verify the commit.
|
||||
func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block) {
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
if r := pool.requesters[pool.height]; r != nil {
|
||||
first = r.getBlock()
|
||||
@@ -209,12 +210,16 @@ func (pool *BlockPool) PopRequest() {
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
if r := pool.requesters[pool.height]; r != nil {
|
||||
/* The block can disappear at any time, due to removePeer().
|
||||
if r := pool.requesters[pool.height]; r == nil || r.block == nil {
|
||||
PanicSanity("PopRequest() requires a valid block")
|
||||
}
|
||||
*/
|
||||
if err := r.Stop(); err != nil {
|
||||
pool.Logger.Error("Error stopping requester", "err", err)
|
||||
}
|
||||
delete(pool.requesters, pool.height)
|
||||
pool.height++
|
||||
pool.lastAdvance = time.Now()
|
||||
} else {
|
||||
panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height))
|
||||
}
|
||||
@@ -244,8 +249,14 @@ func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int
|
||||
|
||||
requester := pool.requesters[block.Height]
|
||||
if requester == nil {
|
||||
pool.Logger.Error("peer sent us a block we didn't expect",
|
||||
"peer", peerID, "curHeight", pool.height, "blockHeight", block.Height)
|
||||
pool.Logger.Info(
|
||||
"peer sent us a block we didn't expect",
|
||||
"peer",
|
||||
peerID,
|
||||
"curHeight",
|
||||
pool.height,
|
||||
"blockHeight",
|
||||
block.Height)
|
||||
diff := pool.height - block.Height
|
||||
if diff < 0 {
|
||||
diff *= -1
|
||||
@@ -263,27 +274,18 @@ func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int
|
||||
peer.decrPending(blockSize)
|
||||
}
|
||||
} else {
|
||||
err := errors.New("requester is different or block already exists")
|
||||
pool.Logger.Error(err.Error(), "peer", peerID, "requester", requester.getPeerID(), "blockHeight", block.Height)
|
||||
pool.sendError(err, peerID)
|
||||
pool.Logger.Info("invalid peer", "peer", peerID, "blockHeight", block.Height)
|
||||
pool.sendError(errors.New("invalid peer"), peerID)
|
||||
}
|
||||
}
|
||||
|
||||
// MaxPeerHeight returns the highest reported height.
|
||||
func (pool *BlockPool) MaxPeerHeight() int64 {
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
return pool.maxPeerHeight
|
||||
}
|
||||
|
||||
// LastAdvance returns the time when the last block was processed (or start
|
||||
// time if no blocks were processed).
|
||||
func (pool *BlockPool) LastAdvance() time.Time {
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
return pool.lastAdvance
|
||||
}
|
||||
|
||||
// SetPeerRange sets the peer's alleged blockchain base and height.
|
||||
func (pool *BlockPool) SetPeerRange(peerID p2p.ID, base int64, height int64) {
|
||||
pool.mtx.Lock()
|
||||
@@ -409,6 +411,7 @@ func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
|
||||
}
|
||||
|
||||
// for debugging purposes
|
||||
//
|
||||
//nolint:unused
|
||||
func (pool *BlockPool) debug() string {
|
||||
pool.mtx.Lock()
|
||||
@@ -600,6 +603,7 @@ OUTER_LOOP:
|
||||
}
|
||||
peer = bpr.pool.pickIncrAvailablePeer(bpr.height)
|
||||
if peer == nil {
|
||||
bpr.Logger.Debug("No peers currently available; will retry shortly", "height", bpr.height)
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
continue PICK_PEER_LOOP
|
||||
}
|
||||
@@ -609,6 +613,7 @@ OUTER_LOOP:
|
||||
bpr.peerID = peer.id
|
||||
bpr.mtx.Unlock()
|
||||
|
||||
to := time.NewTimer(requestRetrySeconds * time.Second)
|
||||
// Send request and wait.
|
||||
bpr.pool.sendRequest(bpr.height, peer.id)
|
||||
WAIT_LOOP:
|
||||
@@ -621,6 +626,11 @@ OUTER_LOOP:
|
||||
return
|
||||
case <-bpr.Quit():
|
||||
return
|
||||
case <-to.C:
|
||||
bpr.Logger.Debug("Retrying block request after timeout", "height", bpr.height, "peer", bpr.peerID)
|
||||
// Simulate a redo
|
||||
bpr.reset()
|
||||
continue OUTER_LOOP
|
||||
case peerID := <-bpr.redoCh:
|
||||
if peerID == bpr.peerID {
|
||||
bpr.reset()
|
||||
@@ -636,3 +646,10 @@ OUTER_LOOP:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BlockRequest stores a block request identified by the block Height and the PeerID responsible for
|
||||
// delivering the block
|
||||
type BlockRequest struct {
|
||||
Height int64
|
||||
PeerID p2p.ID
|
||||
}
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
@@ -28,9 +30,6 @@ const (
|
||||
statusUpdateIntervalSeconds = 10
|
||||
// check if we should switch to consensus reactor
|
||||
switchToConsensusIntervalSeconds = 1
|
||||
|
||||
// switch to consensus after this duration of inactivity
|
||||
syncTimeout = 60 * time.Second
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
@@ -143,25 +142,24 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: BlockchainChannel,
|
||||
Priority: 10,
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 1000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
MessageType: &bcproto.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height()})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
_ = peer.Send(BlockchainChannel, msgBytes)
|
||||
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
},
|
||||
}, bcR.Logger)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
@@ -185,126 +183,115 @@ func (bcR *BlockchainReactor) respondToPeer(msg *bcproto.BlockRequest,
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: bl})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.BlockResponse{Block: bl},
|
||||
}, bcR.Logger)
|
||||
}
|
||||
|
||||
bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.NoBlockResponse{Height: msg.Height},
|
||||
}, bcR.Logger)
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
// XXX: do not call any methods that can block or incur heavy processing.
|
||||
// https://github.com/tendermint/tendermint/issues/2888
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
logger := bcR.Logger.With("src", src, "chId", chID)
|
||||
|
||||
msg, err := bc.DecodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
logger.Error("Error decoding message", "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
|
||||
if err := bc.ValidateMsg(e.Message); err != nil {
|
||||
bcR.Logger.Error("Peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
|
||||
bcR.Switch.StopPeerForError(e.Src, err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = bc.ValidateMsg(msg); err != nil {
|
||||
logger.Error("Peer sent us invalid msg", "msg", msg, "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
bcR.Logger.Debug("Receive", "e.Src", e.Src, "chID", e.ChannelID, "msg", e.Message)
|
||||
|
||||
logger.Debug("Receive", "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
switch msg := e.Message.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
bcR.respondToPeer(msg, src)
|
||||
bcR.respondToPeer(msg, e.Src)
|
||||
case *bcproto.BlockResponse:
|
||||
bi, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
logger.Error("Block content is invalid", "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
bcR.Logger.Error("Block content is invalid", "err", err)
|
||||
return
|
||||
}
|
||||
bcR.pool.AddBlock(src.ID(), bi, len(msgBytes))
|
||||
bcR.pool.AddBlock(e.Src.ID(), bi, msg.Block.Size())
|
||||
case *bcproto.StatusRequest:
|
||||
// Send peer our state.
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
|
||||
Height: bcR.store.Height(),
|
||||
Base: bcR.store.Base(),
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error("could not convert msg to protobut", "err", err)
|
||||
return
|
||||
}
|
||||
src.TrySend(BlockchainChannel, msgBytes)
|
||||
p2p.TrySendEnvelopeShim(e.Src, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Height: bcR.store.Height(),
|
||||
Base: bcR.store.Base(),
|
||||
},
|
||||
}, bcR.Logger)
|
||||
case *bcproto.StatusResponse:
|
||||
// Got a peer status. Unverified.
|
||||
bcR.pool.SetPeerRange(src.ID(), msg.Base, msg.Height)
|
||||
bcR.pool.SetPeerRange(e.Src.ID(), msg.Base, msg.Height)
|
||||
case *bcproto.NoBlockResponse:
|
||||
logger.Debug("Peer does not have requested block", "height", msg.Height)
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
|
||||
default:
|
||||
logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
msg := &bcproto.Message{}
|
||||
err := proto.Unmarshal(msgBytes, msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
uw, err := msg.Unwrap()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bcR.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: chID,
|
||||
Src: peer,
|
||||
Message: uw,
|
||||
})
|
||||
}
|
||||
|
||||
// Handle messages from the poolReactor telling the reactor what to do.
|
||||
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
|
||||
func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) {
|
||||
var (
|
||||
trySyncTicker = time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
statusUpdateTicker = time.NewTicker(statusUpdateIntervalSeconds * time.Second)
|
||||
switchToConsensusTicker = time.NewTicker(switchToConsensusIntervalSeconds * time.Second)
|
||||
|
||||
blocksSynced = uint64(0)
|
||||
trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
defer trySyncTicker.Stop()
|
||||
|
||||
chainID = bcR.initialState.ChainID
|
||||
state = bcR.initialState
|
||||
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
|
||||
defer statusUpdateTicker.Stop()
|
||||
|
||||
lastHundred = time.Now()
|
||||
lastRate = 0.0
|
||||
switchToConsensusTicker := time.NewTicker(switchToConsensusIntervalSeconds * time.Second)
|
||||
defer switchToConsensusTicker.Stop()
|
||||
|
||||
didProcessCh = make(chan struct{}, 1)
|
||||
)
|
||||
blocksSynced := uint64(0)
|
||||
|
||||
chainID := bcR.initialState.ChainID
|
||||
state := bcR.initialState
|
||||
|
||||
lastHundred := time.Now()
|
||||
lastRate := 0.0
|
||||
|
||||
didProcessCh := make(chan struct{}, 1)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
|
||||
case <-bcR.Quit():
|
||||
return
|
||||
|
||||
case <-bcR.pool.Quit():
|
||||
return
|
||||
|
||||
case request := <-bcR.requestsCh:
|
||||
peer := bcR.Switch.Peers().Get(request.PeerID)
|
||||
if peer == nil {
|
||||
bcR.Logger.Debug("Can't send request: no peer", "peer_id", request.PeerID)
|
||||
continue
|
||||
}
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: request.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert BlockRequest to proto", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.BlockRequest{Height: request.Height},
|
||||
}, bcR.Logger)
|
||||
if !queued {
|
||||
bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height)
|
||||
}
|
||||
|
||||
case err := <-bcR.errorsCh:
|
||||
peer := bcR.Switch.Peers().Get(err.peerID)
|
||||
if peer != nil {
|
||||
@@ -313,7 +300,8 @@ func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) {
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// ask for status updates
|
||||
go bcR.BroadcastStatusRequest()
|
||||
go bcR.BroadcastStatusRequest() //nolint: errcheck
|
||||
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -321,40 +309,26 @@ func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) {
|
||||
FOR_LOOP:
|
||||
for {
|
||||
select {
|
||||
|
||||
case <-switchToConsensusTicker.C:
|
||||
var (
|
||||
height, numPending, lenRequesters = bcR.pool.GetStatus()
|
||||
outbound, inbound, _ = bcR.Switch.NumPeers()
|
||||
lastAdvance = bcR.pool.LastAdvance()
|
||||
)
|
||||
|
||||
bcR.Logger.Debug("Consensus ticker",
|
||||
"numPending", numPending,
|
||||
"total", lenRequesters)
|
||||
|
||||
switch {
|
||||
case bcR.pool.IsCaughtUp():
|
||||
height, numPending, lenRequesters := bcR.pool.GetStatus()
|
||||
outbound, inbound, _ := bcR.Switch.NumPeers()
|
||||
bcR.Logger.Debug("Consensus ticker", "numPending", numPending, "total", lenRequesters,
|
||||
"outbound", outbound, "inbound", inbound)
|
||||
if bcR.pool.IsCaughtUp() {
|
||||
bcR.Logger.Info("Time to switch to consensus reactor!", "height", height)
|
||||
case time.Since(lastAdvance) > syncTimeout:
|
||||
bcR.Logger.Error(fmt.Sprintf("No progress since last advance: %v", lastAdvance))
|
||||
default:
|
||||
bcR.Logger.Info("Not caught up yet",
|
||||
"height", height, "max_peer_height", bcR.pool.MaxPeerHeight(),
|
||||
"num_peers", outbound+inbound,
|
||||
"timeout_in", syncTimeout-time.Since(lastAdvance))
|
||||
continue
|
||||
}
|
||||
if err := bcR.pool.Stop(); err != nil {
|
||||
bcR.Logger.Error("Error stopping pool", "err", err)
|
||||
}
|
||||
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
|
||||
}
|
||||
// else {
|
||||
// should only happen during testing
|
||||
// }
|
||||
|
||||
if err := bcR.pool.Stop(); err != nil {
|
||||
bcR.Logger.Error("Error stopping pool", "err", err)
|
||||
break FOR_LOOP
|
||||
}
|
||||
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
|
||||
}
|
||||
|
||||
break FOR_LOOP
|
||||
|
||||
case <-trySyncTicker.C: // chan time
|
||||
select {
|
||||
@@ -382,61 +356,61 @@ FOR_LOOP:
|
||||
didProcessCh <- struct{}{}
|
||||
}
|
||||
|
||||
var (
|
||||
firstParts = first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstPartSetHeader = firstParts.Header()
|
||||
firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
|
||||
)
|
||||
|
||||
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstPartSetHeader := firstParts.Header()
|
||||
firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
|
||||
// Finally, verify the first block using the second's commit
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
err := state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("invalid last commit: %w", err)
|
||||
bcR.Logger.Error(err.Error(),
|
||||
"last_commit", second.LastCommit, "block_id", firstID, "height", first.Height)
|
||||
err := state.Validators.VerifyCommitLight(
|
||||
chainID, firstID, first.Height, second.LastCommit)
|
||||
|
||||
if err == nil {
|
||||
// validate the block before we persist it
|
||||
err = bcR.blockExec.ValidateBlock(state, first)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error in validation", "err", err)
|
||||
peerID := bcR.pool.RedoRequest(first.Height)
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
if peer != nil {
|
||||
// NOTE: we've already removed the peer's request, but we still need
|
||||
// to clean up the rest.
|
||||
bcR.Switch.StopPeerForError(peer, err)
|
||||
// NOTE: we've already removed the peer's request, but we
|
||||
// still need to clean up the rest.
|
||||
bcR.Switch.StopPeerForError(peer, fmt.Errorf("blockchainReactor validation error: %v", err))
|
||||
}
|
||||
|
||||
peerID2 := bcR.pool.RedoRequest(second.Height)
|
||||
if peerID2 != peerID {
|
||||
if peer2 := bcR.Switch.Peers().Get(peerID2); peer2 != nil {
|
||||
bcR.Switch.StopPeerForError(peer2, err)
|
||||
}
|
||||
peer2 := bcR.Switch.Peers().Get(peerID2)
|
||||
if peer2 != nil && peer2 != peer {
|
||||
// NOTE: we've already removed the peer's request, but we
|
||||
// still need to clean up the rest.
|
||||
bcR.Switch.StopPeerForError(peer2, fmt.Errorf("blockchainReactor validation error: %v", err))
|
||||
}
|
||||
|
||||
continue FOR_LOOP
|
||||
} else {
|
||||
bcR.pool.PopRequest()
|
||||
|
||||
// TODO: batch saves so we dont persist to disk every block
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
// TODO: same thing for app - but we would need a way to get the hash
|
||||
// without persisting the state.
|
||||
var err error
|
||||
state, _, err = bcR.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO This is bad, are we zombie?
|
||||
panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
blocksSynced++
|
||||
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
bcR.Logger.Info("Fast Sync Rate",
|
||||
"height", bcR.pool.height, "max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
bcR.pool.PopRequest()
|
||||
|
||||
// TODO: batch saves so we dont persist to disk every block
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
// TODO: same thing for app - but we would need a way to
|
||||
// get the hash without persisting the state
|
||||
state, _, err = bcR.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO This is bad, are we zombie?
|
||||
panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
blocksSynced++
|
||||
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
|
||||
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
|
||||
continue FOR_LOOP
|
||||
|
||||
case <-bcR.Quit():
|
||||
@@ -446,13 +420,10 @@ FOR_LOOP:
|
||||
}
|
||||
|
||||
// BroadcastStatusRequest broadcasts `BlockStore` base and height.
|
||||
func (bcR *BlockchainReactor) BroadcastStatusRequest() {
|
||||
bm, err := bc.EncodeMsg(&bcproto.StatusRequest{})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert StatusRequest to proto", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
// We don't care about whenever broadcast is successful or not.
|
||||
_ = bcR.Switch.Broadcast(BlockchainChannel, bm)
|
||||
func (bcR *BlockchainReactor) BroadcastStatusRequest() error {
|
||||
bcR.Switch.BroadcastEnvelope(p2p.Envelope{
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@@ -17,6 +18,7 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mempool/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
@@ -70,7 +72,9 @@ func newBlockchainReactor(
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
@@ -83,7 +87,9 @@ func newBlockchainReactor(
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db)
|
||||
stateStore = sm.NewStore(db, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
@@ -188,6 +194,25 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLegacyReactorReceiveBasic(t *testing.T) {
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
reactor := newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 10).reactor
|
||||
peer := p2p.CreateRandomPeer(false)
|
||||
|
||||
reactor.InitPeer(peer)
|
||||
reactor.AddPeer(peer)
|
||||
m := &bcproto.StatusRequest{}
|
||||
wm := m.Wrap()
|
||||
msg, err := proto.Marshal(wm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
reactor.Receive(BlockchainChannel, peer, msg)
|
||||
})
|
||||
}
|
||||
|
||||
// NOTE: This is too hard to test without
|
||||
// an easy way to add test peer to switch
|
||||
// or without significant refactoring of the module.
|
||||
|
||||
211
blockchain/v1/peer.go
Normal file
211
blockchain/v1/peer.go
Normal file
@@ -0,0 +1,211 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
flow "github.com/tendermint/tendermint/libs/flowrate"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
//--------
|
||||
// Peer
|
||||
|
||||
// BpPeerParams stores the peer parameters that are used when creating a peer.
|
||||
type BpPeerParams struct {
|
||||
timeout time.Duration
|
||||
minRecvRate int64
|
||||
sampleRate time.Duration
|
||||
windowSize time.Duration
|
||||
}
|
||||
|
||||
// BpPeer is the datastructure associated with a fast sync peer.
|
||||
type BpPeer struct {
|
||||
logger log.Logger
|
||||
ID p2p.ID
|
||||
|
||||
Base int64 // the peer reported base
|
||||
Height int64 // the peer reported height
|
||||
NumPendingBlockRequests int // number of requests still waiting for block responses
|
||||
blocks map[int64]*types.Block // blocks received or expected to be received from this peer
|
||||
blockResponseTimer *time.Timer
|
||||
recvMonitor *flow.Monitor
|
||||
params *BpPeerParams // parameters for timer and monitor
|
||||
|
||||
onErr func(err error, peerID p2p.ID) // function to call on error
|
||||
}
|
||||
|
||||
// NewBpPeer creates a new peer.
|
||||
func NewBpPeer(peerID p2p.ID, base int64, height int64,
|
||||
onErr func(err error, peerID p2p.ID), params *BpPeerParams) *BpPeer {
|
||||
|
||||
if params == nil {
|
||||
params = BpPeerDefaultParams()
|
||||
}
|
||||
return &BpPeer{
|
||||
ID: peerID,
|
||||
Base: base,
|
||||
Height: height,
|
||||
blocks: make(map[int64]*types.Block, maxRequestsPerPeer),
|
||||
logger: log.NewNopLogger(),
|
||||
onErr: onErr,
|
||||
params: params,
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a string representation of a peer.
|
||||
func (peer *BpPeer) String() string {
|
||||
return fmt.Sprintf("peer: %v height: %v pending: %v", peer.ID, peer.Height, peer.NumPendingBlockRequests)
|
||||
}
|
||||
|
||||
// SetLogger sets the logger of the peer.
|
||||
func (peer *BpPeer) SetLogger(l log.Logger) {
|
||||
peer.logger = l
|
||||
}
|
||||
|
||||
// Cleanup performs cleanup of the peer, removes blocks, requests, stops timer and monitor.
|
||||
func (peer *BpPeer) Cleanup() {
|
||||
if peer.blockResponseTimer != nil {
|
||||
peer.blockResponseTimer.Stop()
|
||||
}
|
||||
if peer.NumPendingBlockRequests != 0 {
|
||||
peer.logger.Info("peer with pending requests is being cleaned", "peer", peer.ID)
|
||||
}
|
||||
if len(peer.blocks)-peer.NumPendingBlockRequests != 0 {
|
||||
peer.logger.Info("peer with pending blocks is being cleaned", "peer", peer.ID)
|
||||
}
|
||||
for h := range peer.blocks {
|
||||
delete(peer.blocks, h)
|
||||
}
|
||||
peer.NumPendingBlockRequests = 0
|
||||
peer.recvMonitor = nil
|
||||
}
|
||||
|
||||
// BlockAtHeight returns the block at a given height if available and errMissingBlock otherwise.
|
||||
func (peer *BpPeer) BlockAtHeight(height int64) (*types.Block, error) {
|
||||
block, ok := peer.blocks[height]
|
||||
if !ok {
|
||||
return nil, errMissingBlock
|
||||
}
|
||||
if block == nil {
|
||||
return nil, errMissingBlock
|
||||
}
|
||||
return peer.blocks[height], nil
|
||||
}
|
||||
|
||||
// AddBlock adds a block at peer level. Block must be non-nil and recvSize a positive integer
|
||||
// The peer must have a pending request for this block.
|
||||
func (peer *BpPeer) AddBlock(block *types.Block, recvSize int) error {
|
||||
if block == nil || recvSize < 0 {
|
||||
panic("bad parameters")
|
||||
}
|
||||
existingBlock, ok := peer.blocks[block.Height]
|
||||
if !ok {
|
||||
peer.logger.Error("unsolicited block", "blockHeight", block.Height, "peer", peer.ID)
|
||||
return errMissingBlock
|
||||
}
|
||||
if existingBlock != nil {
|
||||
peer.logger.Error("already have a block for height", "height", block.Height)
|
||||
return errDuplicateBlock
|
||||
}
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
panic("peer does not have pending requests")
|
||||
}
|
||||
peer.blocks[block.Height] = block
|
||||
peer.NumPendingBlockRequests--
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
peer.stopMonitor()
|
||||
peer.stopBlockResponseTimer()
|
||||
} else {
|
||||
peer.recvMonitor.Update(recvSize)
|
||||
peer.resetBlockResponseTimer()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveBlock removes the block of given height
|
||||
func (peer *BpPeer) RemoveBlock(height int64) {
|
||||
delete(peer.blocks, height)
|
||||
}
|
||||
|
||||
// RequestSent records that a request was sent, and starts the peer timer and monitor if needed.
|
||||
func (peer *BpPeer) RequestSent(height int64) {
|
||||
peer.blocks[height] = nil
|
||||
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
peer.startMonitor()
|
||||
peer.resetBlockResponseTimer()
|
||||
}
|
||||
peer.NumPendingBlockRequests++
|
||||
}
|
||||
|
||||
// CheckRate verifies that the response rate of the peer is acceptable (higher than the minimum allowed).
|
||||
func (peer *BpPeer) CheckRate() error {
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
return nil
|
||||
}
|
||||
curRate := peer.recvMonitor.Status().CurRate
|
||||
// curRate can be 0 on start
|
||||
if curRate != 0 && curRate < peer.params.minRecvRate {
|
||||
err := errSlowPeer
|
||||
peer.logger.Error("SendTimeout", "peer", peer,
|
||||
"reason", err,
|
||||
"curRate", fmt.Sprintf("%d KB/s", curRate/1024),
|
||||
"minRate", fmt.Sprintf("%d KB/s", peer.params.minRecvRate/1024))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (peer *BpPeer) onTimeout() {
|
||||
peer.onErr(errNoPeerResponse, peer.ID)
|
||||
}
|
||||
|
||||
func (peer *BpPeer) stopMonitor() {
|
||||
peer.recvMonitor.Done()
|
||||
peer.recvMonitor = nil
|
||||
}
|
||||
|
||||
func (peer *BpPeer) startMonitor() {
|
||||
peer.recvMonitor = flow.New(peer.params.sampleRate, peer.params.windowSize)
|
||||
initialValue := float64(peer.params.minRecvRate) * math.E
|
||||
peer.recvMonitor.SetREMA(initialValue)
|
||||
}
|
||||
|
||||
func (peer *BpPeer) resetBlockResponseTimer() {
|
||||
if peer.blockResponseTimer == nil {
|
||||
peer.blockResponseTimer = time.AfterFunc(peer.params.timeout, peer.onTimeout)
|
||||
} else {
|
||||
peer.blockResponseTimer.Reset(peer.params.timeout)
|
||||
}
|
||||
}
|
||||
|
||||
func (peer *BpPeer) stopBlockResponseTimer() bool {
|
||||
if peer.blockResponseTimer == nil {
|
||||
return false
|
||||
}
|
||||
return peer.blockResponseTimer.Stop()
|
||||
}
|
||||
|
||||
// BpPeerDefaultParams returns the default peer parameters.
|
||||
func BpPeerDefaultParams() *BpPeerParams {
|
||||
return &BpPeerParams{
|
||||
// Timeout for a peer to respond to a block request.
|
||||
timeout: 15 * time.Second,
|
||||
|
||||
// Minimum recv rate to ensure we're receiving blocks from a peer fast
|
||||
// enough. If a peer is not sending data at at least that rate, we
|
||||
// consider them to have timedout and we disconnect.
|
||||
//
|
||||
// Assuming a DSL connection (not a good choice) 128 Kbps (upload) ~ 15 KB/s,
|
||||
// sending data across atlantic ~ 7.5 KB/s.
|
||||
minRecvRate: int64(7680),
|
||||
|
||||
// Monitor parameters
|
||||
sampleRate: time.Second,
|
||||
windowSize: 40 * time.Second,
|
||||
}
|
||||
}
|
||||
280
blockchain/v1/peer_test.go
Normal file
280
blockchain/v1/peer_test.go
Normal file
@@ -0,0 +1,280 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestPeerMonitor(t *testing.T) {
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
nil)
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
peer.startMonitor()
|
||||
assert.NotNil(t, peer.recvMonitor)
|
||||
peer.stopMonitor()
|
||||
assert.Nil(t, peer.recvMonitor)
|
||||
}
|
||||
|
||||
func TestPeerResetBlockResponseTimer(t *testing.T) {
|
||||
var (
|
||||
numErrFuncCalls int // number of calls to the errFunc
|
||||
lastErr error // last generated error
|
||||
peerTestMtx sync.Mutex // modifications of ^^ variables are also done from timer handler goroutine
|
||||
)
|
||||
params := &BpPeerParams{timeout: 20 * time.Millisecond}
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 10,
|
||||
func(err error, _ p2p.ID) {
|
||||
peerTestMtx.Lock()
|
||||
defer peerTestMtx.Unlock()
|
||||
lastErr = err
|
||||
numErrFuncCalls++
|
||||
},
|
||||
params)
|
||||
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
checkByStoppingPeerTimer(t, peer, false)
|
||||
|
||||
// initial reset call with peer having a nil timer
|
||||
peer.resetBlockResponseTimer()
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
// make sure timer is running and stop it
|
||||
checkByStoppingPeerTimer(t, peer, true)
|
||||
|
||||
// reset with running timer
|
||||
peer.resetBlockResponseTimer()
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
peer.resetBlockResponseTimer()
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
|
||||
// let the timer expire and ...
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
// ... check timer is not running
|
||||
checkByStoppingPeerTimer(t, peer, false)
|
||||
|
||||
peerTestMtx.Lock()
|
||||
// ... check errNoPeerResponse has been sent
|
||||
assert.Equal(t, 1, numErrFuncCalls)
|
||||
assert.Equal(t, lastErr, errNoPeerResponse)
|
||||
peerTestMtx.Unlock()
|
||||
}
|
||||
|
||||
func TestPeerRequestSent(t *testing.T) {
|
||||
params := &BpPeerParams{timeout: 2 * time.Millisecond}
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
params)
|
||||
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
peer.RequestSent(1)
|
||||
assert.NotNil(t, peer.recvMonitor)
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
assert.Equal(t, 1, peer.NumPendingBlockRequests)
|
||||
|
||||
peer.RequestSent(1)
|
||||
assert.NotNil(t, peer.recvMonitor)
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
assert.Equal(t, 2, peer.NumPendingBlockRequests)
|
||||
}
|
||||
|
||||
func TestPeerGetAndRemoveBlock(t *testing.T) {
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 100,
|
||||
func(err error, _ p2p.ID) {},
|
||||
nil)
|
||||
|
||||
// Change peer height
|
||||
peer.Height = int64(10)
|
||||
assert.Equal(t, int64(10), peer.Height)
|
||||
|
||||
// request some blocks and receive few of them
|
||||
for i := 1; i <= 10; i++ {
|
||||
peer.RequestSent(int64(i))
|
||||
if i > 5 {
|
||||
// only receive blocks 1..5
|
||||
continue
|
||||
}
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 10)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
height int64
|
||||
wantErr error
|
||||
blockPresent bool
|
||||
}{
|
||||
{"no request", 100, errMissingBlock, false},
|
||||
{"no block", 6, errMissingBlock, false},
|
||||
{"block 1 present", 1, nil, true},
|
||||
{"block max present", 5, nil, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// try to get the block
|
||||
b, err := peer.BlockAtHeight(tt.height)
|
||||
assert.Equal(t, tt.wantErr, err)
|
||||
assert.Equal(t, tt.blockPresent, b != nil)
|
||||
|
||||
// remove the block
|
||||
peer.RemoveBlock(tt.height)
|
||||
_, err = peer.BlockAtHeight(tt.height)
|
||||
assert.Equal(t, errMissingBlock, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerAddBlock(t *testing.T) {
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 100,
|
||||
func(err error, _ p2p.ID) {},
|
||||
nil)
|
||||
|
||||
// request some blocks, receive one
|
||||
for i := 1; i <= 10; i++ {
|
||||
peer.RequestSent(int64(i))
|
||||
if i == 5 {
|
||||
// receive block 5
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 10)
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
height int64
|
||||
wantErr error
|
||||
blockPresent bool
|
||||
}{
|
||||
{"no request", 50, errMissingBlock, false},
|
||||
{"duplicate block", 5, errDuplicateBlock, true},
|
||||
{"block 1 successfully received", 1, nil, true},
|
||||
{"block max successfully received", 10, nil, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// try to get the block
|
||||
err := peer.AddBlock(makeSmallBlock(int(tt.height)), 10)
|
||||
assert.Equal(t, tt.wantErr, err)
|
||||
_, err = peer.BlockAtHeight(tt.height)
|
||||
assert.Equal(t, tt.blockPresent, err == nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerOnErrFuncCalledDueToExpiration(t *testing.T) {
|
||||
|
||||
params := &BpPeerParams{timeout: 10 * time.Millisecond}
|
||||
var (
|
||||
numErrFuncCalls int // number of calls to the onErr function
|
||||
lastErr error // last generated error
|
||||
peerTestMtx sync.Mutex // modifications of ^^ variables are also done from timer handler goroutine
|
||||
)
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 10,
|
||||
func(err error, _ p2p.ID) {
|
||||
peerTestMtx.Lock()
|
||||
defer peerTestMtx.Unlock()
|
||||
lastErr = err
|
||||
numErrFuncCalls++
|
||||
},
|
||||
params)
|
||||
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
peer.RequestSent(1)
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
// timer should have expired by now, check that the on error function was called
|
||||
peerTestMtx.Lock()
|
||||
assert.Equal(t, 1, numErrFuncCalls)
|
||||
assert.Equal(t, errNoPeerResponse, lastErr)
|
||||
peerTestMtx.Unlock()
|
||||
}
|
||||
|
||||
func TestPeerCheckRate(t *testing.T) {
|
||||
params := &BpPeerParams{
|
||||
timeout: time.Second,
|
||||
minRecvRate: int64(100), // 100 bytes/sec exponential moving average
|
||||
}
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
params)
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
require.Nil(t, peer.CheckRate())
|
||||
|
||||
for i := 0; i < 40; i++ {
|
||||
peer.RequestSent(int64(i))
|
||||
}
|
||||
|
||||
// monitor starts with a higher rEMA (~ 2*minRecvRate), wait for it to go down
|
||||
time.Sleep(900 * time.Millisecond)
|
||||
|
||||
// normal peer - send a bit more than 100 bytes/sec, > 10 bytes/100msec, check peer is not considered slow
|
||||
for i := 0; i < 10; i++ {
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 11)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
require.Nil(t, peer.CheckRate())
|
||||
}
|
||||
|
||||
// slow peer - send a bit less than 10 bytes/100msec
|
||||
for i := 10; i < 20; i++ {
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 9)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
// check peer is considered slow
|
||||
assert.Equal(t, errSlowPeer, peer.CheckRate())
|
||||
}
|
||||
|
||||
func TestPeerCleanup(t *testing.T) {
|
||||
params := &BpPeerParams{timeout: 2 * time.Millisecond}
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
params)
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
assert.Nil(t, peer.blockResponseTimer)
|
||||
peer.RequestSent(1)
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
|
||||
peer.Cleanup()
|
||||
checkByStoppingPeerTimer(t, peer, false)
|
||||
}
|
||||
|
||||
// Check if peer timer is running or not (a running timer can be successfully stopped).
|
||||
// Note: stops the timer.
|
||||
func checkByStoppingPeerTimer(t *testing.T, peer *BpPeer, running bool) {
|
||||
assert.NotPanics(t, func() {
|
||||
stopped := peer.stopBlockResponseTimer()
|
||||
if running {
|
||||
assert.True(t, stopped)
|
||||
} else {
|
||||
assert.False(t, stopped)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func makeSmallBlock(height int) *types.Block {
|
||||
return types.MakeBlock(int64(height), []types.Tx{types.Tx("foo")}, nil, nil)
|
||||
}
|
||||
370
blockchain/v1/pool.go
Normal file
370
blockchain/v1/pool.go
Normal file
@@ -0,0 +1,370 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// BlockPool keeps track of the fast sync peers, block requests and block responses.
|
||||
type BlockPool struct {
|
||||
logger log.Logger
|
||||
// Set of peers that have sent status responses, with height bigger than pool.Height
|
||||
peers map[p2p.ID]*BpPeer
|
||||
// Set of block heights and the corresponding peers from where a block response is expected or has been received.
|
||||
blocks map[int64]p2p.ID
|
||||
|
||||
plannedRequests map[int64]struct{} // list of blocks to be assigned peers for blockRequest
|
||||
nextRequestHeight int64 // next height to be added to plannedRequests
|
||||
|
||||
Height int64 // height of next block to execute
|
||||
MaxPeerHeight int64 // maximum height of all peers
|
||||
toBcR bcReactor
|
||||
}
|
||||
|
||||
// NewBlockPool creates a new BlockPool.
|
||||
func NewBlockPool(height int64, toBcR bcReactor) *BlockPool {
|
||||
return &BlockPool{
|
||||
Height: height,
|
||||
MaxPeerHeight: 0,
|
||||
peers: make(map[p2p.ID]*BpPeer),
|
||||
blocks: make(map[int64]p2p.ID),
|
||||
plannedRequests: make(map[int64]struct{}),
|
||||
nextRequestHeight: height,
|
||||
toBcR: toBcR,
|
||||
}
|
||||
}
|
||||
|
||||
// SetLogger sets the logger of the pool.
|
||||
func (pool *BlockPool) SetLogger(l log.Logger) {
|
||||
pool.logger = l
|
||||
}
|
||||
|
||||
// ReachedMaxHeight check if the pool has reached the maximum peer height.
|
||||
func (pool *BlockPool) ReachedMaxHeight() bool {
|
||||
return pool.Height >= pool.MaxPeerHeight
|
||||
}
|
||||
|
||||
func (pool *BlockPool) rescheduleRequest(peerID p2p.ID, height int64) {
|
||||
pool.logger.Info("reschedule requests made to peer for height ", "peerID", peerID, "height", height)
|
||||
pool.plannedRequests[height] = struct{}{}
|
||||
delete(pool.blocks, height)
|
||||
pool.peers[peerID].RemoveBlock(height)
|
||||
}
|
||||
|
||||
// Updates the pool's max height. If no peers are left MaxPeerHeight is set to 0.
|
||||
func (pool *BlockPool) updateMaxPeerHeight() {
|
||||
var newMax int64
|
||||
for _, peer := range pool.peers {
|
||||
peerHeight := peer.Height
|
||||
if peerHeight > newMax {
|
||||
newMax = peerHeight
|
||||
}
|
||||
}
|
||||
pool.MaxPeerHeight = newMax
|
||||
}
|
||||
|
||||
// UpdatePeer adds a new peer or updates an existing peer with a new base and height.
|
||||
// If a peer is short it is not added.
|
||||
func (pool *BlockPool) UpdatePeer(peerID p2p.ID, base int64, height int64) error {
|
||||
|
||||
peer := pool.peers[peerID]
|
||||
|
||||
if peer == nil {
|
||||
if height < pool.Height {
|
||||
pool.logger.Info("Peer height too small",
|
||||
"peer", peerID, "height", height, "fsm_height", pool.Height)
|
||||
return errPeerTooShort
|
||||
}
|
||||
// Add new peer.
|
||||
peer = NewBpPeer(peerID, base, height, pool.toBcR.sendPeerError, nil)
|
||||
peer.SetLogger(pool.logger.With("peer", peerID))
|
||||
pool.peers[peerID] = peer
|
||||
pool.logger.Info("added peer", "peerID", peerID, "base", base, "height", height, "num_peers", len(pool.peers))
|
||||
} else {
|
||||
// Check if peer is lowering its height. This is not allowed.
|
||||
if height < peer.Height {
|
||||
pool.RemovePeer(peerID, errPeerLowersItsHeight)
|
||||
return errPeerLowersItsHeight
|
||||
}
|
||||
// Update existing peer.
|
||||
peer.Base = base
|
||||
peer.Height = height
|
||||
}
|
||||
|
||||
// Update the pool's MaxPeerHeight if needed.
|
||||
pool.updateMaxPeerHeight()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleans and deletes the peer. Recomputes the max peer height.
|
||||
func (pool *BlockPool) deletePeer(peer *BpPeer) {
|
||||
if peer == nil {
|
||||
return
|
||||
}
|
||||
peer.Cleanup()
|
||||
delete(pool.peers, peer.ID)
|
||||
|
||||
if peer.Height == pool.MaxPeerHeight {
|
||||
pool.updateMaxPeerHeight()
|
||||
}
|
||||
}
|
||||
|
||||
// RemovePeer removes the blocks and requests from the peer, reschedules them and deletes the peer.
|
||||
func (pool *BlockPool) RemovePeer(peerID p2p.ID, err error) {
|
||||
peer := pool.peers[peerID]
|
||||
if peer == nil {
|
||||
return
|
||||
}
|
||||
pool.logger.Info("removing peer", "peerID", peerID, "error", err)
|
||||
|
||||
// Reschedule the block requests made to the peer, or received and not processed yet.
|
||||
// Note that some of the requests may be removed further down.
|
||||
for h := range pool.peers[peerID].blocks {
|
||||
pool.rescheduleRequest(peerID, h)
|
||||
}
|
||||
|
||||
oldMaxPeerHeight := pool.MaxPeerHeight
|
||||
// Delete the peer. This operation may result in the pool's MaxPeerHeight being lowered.
|
||||
pool.deletePeer(peer)
|
||||
|
||||
// Check if the pool's MaxPeerHeight has been lowered.
|
||||
// This may happen if the tallest peer has been removed.
|
||||
if oldMaxPeerHeight > pool.MaxPeerHeight {
|
||||
// Remove any planned requests for heights over the new MaxPeerHeight.
|
||||
for h := range pool.plannedRequests {
|
||||
if h > pool.MaxPeerHeight {
|
||||
delete(pool.plannedRequests, h)
|
||||
}
|
||||
}
|
||||
// Adjust the nextRequestHeight to the new max plus one.
|
||||
if pool.nextRequestHeight > pool.MaxPeerHeight {
|
||||
pool.nextRequestHeight = pool.MaxPeerHeight + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) removeShortPeers() {
|
||||
for _, peer := range pool.peers {
|
||||
if peer.Height < pool.Height {
|
||||
pool.RemovePeer(peer.ID, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) removeBadPeers() {
|
||||
pool.removeShortPeers()
|
||||
for _, peer := range pool.peers {
|
||||
if err := peer.CheckRate(); err != nil {
|
||||
pool.RemovePeer(peer.ID, err)
|
||||
pool.toBcR.sendPeerError(err, peer.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MakeNextRequests creates more requests if the block pool is running low.
|
||||
func (pool *BlockPool) MakeNextRequests(maxNumRequests int) {
|
||||
heights := pool.makeRequestBatch(maxNumRequests)
|
||||
if len(heights) != 0 {
|
||||
pool.logger.Info("makeNextRequests will make following requests",
|
||||
"number", len(heights), "heights", heights)
|
||||
}
|
||||
|
||||
for _, height := range heights {
|
||||
h := int64(height)
|
||||
if !pool.sendRequest(h) {
|
||||
// If a good peer was not found for sending the request at height h then return,
|
||||
// as it shouldn't be possible to find a peer for h+1.
|
||||
return
|
||||
}
|
||||
delete(pool.plannedRequests, h)
|
||||
}
|
||||
}
|
||||
|
||||
// Makes a batch of requests sorted by height such that the block pool has up to maxNumRequests entries.
|
||||
func (pool *BlockPool) makeRequestBatch(maxNumRequests int) []int {
|
||||
pool.removeBadPeers()
|
||||
// At this point pool.requests may include heights for requests to be redone due to removal of peers:
|
||||
// - peers timed out or were removed by switch
|
||||
// - FSM timed out on waiting to advance the block execution due to missing blocks at h or h+1
|
||||
// Determine the number of requests needed by subtracting the number of requests already made from the maximum
|
||||
// allowed
|
||||
numNeeded := maxNumRequests - len(pool.blocks)
|
||||
for len(pool.plannedRequests) < numNeeded {
|
||||
if pool.nextRequestHeight > pool.MaxPeerHeight {
|
||||
break
|
||||
}
|
||||
pool.plannedRequests[pool.nextRequestHeight] = struct{}{}
|
||||
pool.nextRequestHeight++
|
||||
}
|
||||
|
||||
heights := make([]int, 0, len(pool.plannedRequests))
|
||||
for k := range pool.plannedRequests {
|
||||
heights = append(heights, int(k))
|
||||
}
|
||||
sort.Ints(heights)
|
||||
return heights
|
||||
}
|
||||
|
||||
func (pool *BlockPool) sendRequest(height int64) bool {
|
||||
for _, peer := range pool.peers {
|
||||
if peer.NumPendingBlockRequests >= maxRequestsPerPeer {
|
||||
continue
|
||||
}
|
||||
if peer.Base > height || peer.Height < height {
|
||||
continue
|
||||
}
|
||||
|
||||
err := pool.toBcR.sendBlockRequest(peer.ID, height)
|
||||
if err == errNilPeerForBlockRequest {
|
||||
// Switch does not have this peer, remove it and continue to look for another peer.
|
||||
pool.logger.Error("switch does not have peer..removing peer selected for height", "peer",
|
||||
peer.ID, "height", height)
|
||||
pool.RemovePeer(peer.ID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err == errSendQueueFull {
|
||||
pool.logger.Error("peer queue is full", "peer", peer.ID, "height", height)
|
||||
continue
|
||||
}
|
||||
|
||||
pool.logger.Info("assigned request to peer", "peer", peer.ID, "height", height)
|
||||
|
||||
pool.blocks[height] = peer.ID
|
||||
peer.RequestSent(height)
|
||||
|
||||
return true
|
||||
}
|
||||
pool.logger.Error("could not find peer to send request for block at height", "height", height)
|
||||
return false
|
||||
}
|
||||
|
||||
// AddBlock validates that the block comes from the peer it was expected from and stores it in the 'blocks' map.
|
||||
func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int) error {
|
||||
peer, ok := pool.peers[peerID]
|
||||
if !ok {
|
||||
pool.logger.Error("block from unknown peer", "height", block.Height, "peer", peerID)
|
||||
return errBadDataFromPeer
|
||||
}
|
||||
if wantPeerID, ok := pool.blocks[block.Height]; ok && wantPeerID != peerID {
|
||||
pool.logger.Error("block received from wrong peer", "height", block.Height,
|
||||
"peer", peerID, "expected_peer", wantPeerID)
|
||||
return errBadDataFromPeer
|
||||
}
|
||||
|
||||
return peer.AddBlock(block, blockSize)
|
||||
}
|
||||
|
||||
// BlockData stores the peer responsible to deliver a block and the actual block if delivered.
|
||||
type BlockData struct {
|
||||
block *types.Block
|
||||
peer *BpPeer
|
||||
}
|
||||
|
||||
// BlockAndPeerAtHeight retrieves the block and delivery peer at specified height.
|
||||
// Returns errMissingBlock if a block was not found
|
||||
func (pool *BlockPool) BlockAndPeerAtHeight(height int64) (bData *BlockData, err error) {
|
||||
peerID := pool.blocks[height]
|
||||
peer := pool.peers[peerID]
|
||||
if peer == nil {
|
||||
return nil, errMissingBlock
|
||||
}
|
||||
|
||||
block, err := peer.BlockAtHeight(height)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &BlockData{peer: peer, block: block}, nil
|
||||
|
||||
}
|
||||
|
||||
// FirstTwoBlocksAndPeers returns the blocks and the delivery peers at pool's height H and H+1.
|
||||
func (pool *BlockPool) FirstTwoBlocksAndPeers() (first, second *BlockData, err error) {
|
||||
first, err = pool.BlockAndPeerAtHeight(pool.Height)
|
||||
second, err2 := pool.BlockAndPeerAtHeight(pool.Height + 1)
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// InvalidateFirstTwoBlocks removes the peers that sent us the first two blocks, blocks are removed by RemovePeer().
|
||||
func (pool *BlockPool) InvalidateFirstTwoBlocks(err error) {
|
||||
first, err1 := pool.BlockAndPeerAtHeight(pool.Height)
|
||||
second, err2 := pool.BlockAndPeerAtHeight(pool.Height + 1)
|
||||
|
||||
if err1 == nil {
|
||||
pool.RemovePeer(first.peer.ID, err)
|
||||
}
|
||||
if err2 == nil {
|
||||
pool.RemovePeer(second.peer.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessedCurrentHeightBlock performs cleanup after a block is processed. It removes block at pool height and
|
||||
// the peers that are now short.
|
||||
func (pool *BlockPool) ProcessedCurrentHeightBlock() {
|
||||
peerID, peerOk := pool.blocks[pool.Height]
|
||||
if peerOk {
|
||||
pool.peers[peerID].RemoveBlock(pool.Height)
|
||||
}
|
||||
delete(pool.blocks, pool.Height)
|
||||
pool.logger.Debug("removed block at height", "height", pool.Height)
|
||||
pool.Height++
|
||||
pool.removeShortPeers()
|
||||
}
|
||||
|
||||
// RemovePeerAtCurrentHeights checks if a block at pool's height H exists and if not, it removes the
|
||||
// delivery peer and returns. If a block at height H exists then the check and peer removal is done for H+1.
|
||||
// This function is called when the FSM is not able to make progress for some time.
|
||||
// This happens if either the block H or H+1 have not been delivered.
|
||||
func (pool *BlockPool) RemovePeerAtCurrentHeights(err error) {
|
||||
peerID := pool.blocks[pool.Height]
|
||||
peer, ok := pool.peers[peerID]
|
||||
if ok {
|
||||
if _, err := peer.BlockAtHeight(pool.Height); err != nil {
|
||||
pool.logger.Info("remove peer that hasn't sent block at pool.Height",
|
||||
"peer", peerID, "height", pool.Height)
|
||||
pool.RemovePeer(peerID, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
peerID = pool.blocks[pool.Height+1]
|
||||
peer, ok = pool.peers[peerID]
|
||||
if ok {
|
||||
if _, err := peer.BlockAtHeight(pool.Height + 1); err != nil {
|
||||
pool.logger.Info("remove peer that hasn't sent block at pool.Height+1",
|
||||
"peer", peerID, "height", pool.Height+1)
|
||||
pool.RemovePeer(peerID, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup performs pool and peer cleanup
|
||||
func (pool *BlockPool) Cleanup() {
|
||||
for id, peer := range pool.peers {
|
||||
peer.Cleanup()
|
||||
delete(pool.peers, id)
|
||||
}
|
||||
pool.plannedRequests = make(map[int64]struct{})
|
||||
pool.blocks = make(map[int64]p2p.ID)
|
||||
pool.nextRequestHeight = 0
|
||||
pool.Height = 0
|
||||
pool.MaxPeerHeight = 0
|
||||
}
|
||||
|
||||
// NumPeers returns the number of peers in the pool
|
||||
func (pool *BlockPool) NumPeers() int {
|
||||
return len(pool.peers)
|
||||
}
|
||||
|
||||
// NeedsBlocks returns true if more blocks are required.
|
||||
func (pool *BlockPool) NeedsBlocks() bool {
|
||||
return len(pool.blocks) < maxNumRequests
|
||||
}
|
||||
691
blockchain/v1/pool_test.go
Normal file
691
blockchain/v1/pool_test.go
Normal file
@@ -0,0 +1,691 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type testPeer struct {
|
||||
id p2p.ID
|
||||
base int64
|
||||
height int64
|
||||
}
|
||||
|
||||
type testBcR struct {
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
type testValues struct {
|
||||
numRequestsSent int
|
||||
}
|
||||
|
||||
var testResults testValues
|
||||
|
||||
func resetPoolTestResults() {
|
||||
testResults.numRequestsSent = 0
|
||||
}
|
||||
|
||||
func (testR *testBcR) sendPeerError(err error, peerID p2p.ID) {
|
||||
}
|
||||
|
||||
func (testR *testBcR) sendStatusRequest() {
|
||||
}
|
||||
|
||||
func (testR *testBcR) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
testResults.numRequestsSent++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (testR *testBcR) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
|
||||
}
|
||||
|
||||
func (testR *testBcR) switchToConsensus() {
|
||||
|
||||
}
|
||||
|
||||
func newTestBcR() *testBcR {
|
||||
testBcR := &testBcR{logger: log.TestingLogger()}
|
||||
return testBcR
|
||||
}
|
||||
|
||||
type tPBlocks struct {
|
||||
id p2p.ID
|
||||
create bool
|
||||
}
|
||||
|
||||
// Makes a block pool with specified current height, list of peers, block requests and block responses
|
||||
func makeBlockPool(bcr *testBcR, height int64, peers []BpPeer, blocks map[int64]tPBlocks) *BlockPool {
|
||||
bPool := NewBlockPool(height, bcr)
|
||||
bPool.SetLogger(bcr.logger)
|
||||
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
|
||||
var maxH int64
|
||||
for _, p := range peers {
|
||||
if p.Height > maxH {
|
||||
maxH = p.Height
|
||||
}
|
||||
bPool.peers[p.ID] = NewBpPeer(p.ID, p.Base, p.Height, bcr.sendPeerError, nil)
|
||||
bPool.peers[p.ID].SetLogger(bcr.logger)
|
||||
|
||||
}
|
||||
bPool.MaxPeerHeight = maxH
|
||||
for h, p := range blocks {
|
||||
bPool.blocks[h] = p.id
|
||||
bPool.peers[p.id].RequestSent(h)
|
||||
if p.create {
|
||||
// simulate that a block at height h has been received
|
||||
_ = bPool.peers[p.id].AddBlock(types.MakeBlock(h, txs, nil, nil), 100)
|
||||
}
|
||||
}
|
||||
return bPool
|
||||
}
|
||||
|
||||
func assertPeerSetsEquivalent(t *testing.T, set1 map[p2p.ID]*BpPeer, set2 map[p2p.ID]*BpPeer) {
|
||||
assert.Equal(t, len(set1), len(set2))
|
||||
for peerID, peer1 := range set1 {
|
||||
peer2 := set2[peerID]
|
||||
assert.NotNil(t, peer2)
|
||||
assert.Equal(t, peer1.NumPendingBlockRequests, peer2.NumPendingBlockRequests)
|
||||
assert.Equal(t, peer1.Height, peer2.Height)
|
||||
assert.Equal(t, peer1.Base, peer2.Base)
|
||||
assert.Equal(t, len(peer1.blocks), len(peer2.blocks))
|
||||
for h, block1 := range peer1.blocks {
|
||||
block2 := peer2.blocks[h]
|
||||
// block1 and block2 could be nil if a request was made but no block was received
|
||||
assert.Equal(t, block1, block2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func assertBlockPoolEquivalent(t *testing.T, poolWanted, pool *BlockPool) {
|
||||
assert.Equal(t, poolWanted.blocks, pool.blocks)
|
||||
assertPeerSetsEquivalent(t, poolWanted.peers, pool.peers)
|
||||
assert.Equal(t, poolWanted.MaxPeerHeight, pool.MaxPeerHeight)
|
||||
assert.Equal(t, poolWanted.Height, pool.Height)
|
||||
|
||||
}
|
||||
|
||||
func TestBlockPoolUpdatePeer(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
args testPeer
|
||||
poolWanted *BlockPool
|
||||
errWanted error
|
||||
}{
|
||||
{
|
||||
name: "add a first short peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 0, 50},
|
||||
errWanted: errPeerTooShort,
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "add a first good peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 0, 101},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 101}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "add a first good peer with base",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 10, 101},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Base: 10, Height: 101}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "increase the height of P1 from 120 to 123",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 0, 123},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 123}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "decrease the height of P1 from 120 to 110",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 0, 110},
|
||||
errWanted: errPeerLowersItsHeight,
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "decrease the height of P1 from 105 to 102 with blocks",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 105}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 101: {"P1", true}, 102: {"P1", true}}),
|
||||
args: testPeer{"P1", 0, 102},
|
||||
errWanted: errPeerLowersItsHeight,
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{},
|
||||
map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pool := tt.pool
|
||||
err := pool.UpdatePeer(tt.args.id, tt.args.base, tt.args.height)
|
||||
assert.Equal(t, tt.errWanted, err)
|
||||
assert.Equal(t, tt.poolWanted.blocks, tt.pool.blocks)
|
||||
assertPeerSetsEquivalent(t, tt.poolWanted.peers, tt.pool.peers)
|
||||
assert.Equal(t, tt.poolWanted.MaxPeerHeight, tt.pool.MaxPeerHeight)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
err error
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
args args
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "attempt to delete non-existing peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: args{"P99", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the only peer without blocks",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the shortest of two peers without blocks",
|
||||
pool: makeBlockPool(
|
||||
testBcR,
|
||||
100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 120}},
|
||||
map[int64]tPBlocks{}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the tallest of two peers without blocks",
|
||||
pool: makeBlockPool(
|
||||
testBcR,
|
||||
100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 120}},
|
||||
map[int64]tPBlocks{}),
|
||||
args: args{"P2", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the only peer with block requests sent and blocks received",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the shortest of two peers with block requests sent and blocks received",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 200}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 200}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the tallest of two peers with block requests sent and blocks received",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 110}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 110}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.RemovePeer(tt.args.peerID, tt.args.err)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolRemoveShortPeers(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "no short peers",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 110}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 110}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
|
||||
{
|
||||
name: "one short peer",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 90}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
|
||||
{
|
||||
name: "all short peers",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 90}, {ID: "P2", Height: 91}, {ID: "P3", Height: 92}}, map[int64]tPBlocks{}),
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pool := tt.pool
|
||||
pool.removeShortPeers()
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolSendRequestBatch(t *testing.T) {
|
||||
type testPeerResult struct {
|
||||
id p2p.ID
|
||||
numPendingBlockRequests int
|
||||
}
|
||||
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
maxRequestsPerPeer int
|
||||
expRequests map[int64]bool
|
||||
expRequestsSent int
|
||||
expPeerResults []testPeerResult
|
||||
}{
|
||||
{
|
||||
name: "one peer - send up to maxRequestsPerPeer block requests",
|
||||
pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
maxRequestsPerPeer: 2,
|
||||
expRequests: map[int64]bool{10: true, 11: true},
|
||||
expRequestsSent: 2,
|
||||
expPeerResults: []testPeerResult{{id: "P1", numPendingBlockRequests: 2}},
|
||||
},
|
||||
{
|
||||
name: "multiple peers - stops at gap between height and base",
|
||||
pool: makeBlockPool(testBcR, 10, []BpPeer{
|
||||
{ID: "P1", Base: 1, Height: 12},
|
||||
{ID: "P2", Base: 15, Height: 100},
|
||||
}, map[int64]tPBlocks{}),
|
||||
maxRequestsPerPeer: 10,
|
||||
expRequests: map[int64]bool{10: true, 11: true, 12: true},
|
||||
expRequestsSent: 3,
|
||||
expPeerResults: []testPeerResult{
|
||||
{id: "P1", numPendingBlockRequests: 3},
|
||||
{id: "P2", numPendingBlockRequests: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "n peers - send n*maxRequestsPerPeer block requests",
|
||||
pool: makeBlockPool(
|
||||
testBcR,
|
||||
10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{}),
|
||||
maxRequestsPerPeer: 2,
|
||||
expRequests: map[int64]bool{10: true, 11: true},
|
||||
expRequestsSent: 4,
|
||||
expPeerResults: []testPeerResult{
|
||||
{id: "P1", numPendingBlockRequests: 2},
|
||||
{id: "P2", numPendingBlockRequests: 2}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
resetPoolTestResults()
|
||||
|
||||
var pool = tt.pool
|
||||
maxRequestsPerPeer = tt.maxRequestsPerPeer
|
||||
pool.MakeNextRequests(10)
|
||||
|
||||
assert.Equal(t, tt.expRequestsSent, testResults.numRequestsSent)
|
||||
for _, tPeer := range tt.expPeerResults {
|
||||
var peer = pool.peers[tPeer.id]
|
||||
assert.NotNil(t, peer)
|
||||
assert.Equal(t, tPeer.numPendingBlockRequests, peer.NumPendingBlockRequests)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolAddBlock(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
block *types.Block
|
||||
blockSize int
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
args args
|
||||
poolWanted *BlockPool
|
||||
errWanted error
|
||||
}{
|
||||
{name: "block from unknown peer",
|
||||
pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
args: args{
|
||||
peerID: "P2",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
errWanted: errBadDataFromPeer,
|
||||
},
|
||||
{name: "unexpected block 11 from known peer - waiting for 10",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P1",
|
||||
block: types.MakeBlock(int64(11), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{name: "unexpected block 10 from known peer - already have 10",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P1",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P1", false}}),
|
||||
errWanted: errDuplicateBlock,
|
||||
},
|
||||
{name: "unexpected block 10 from known peer P2 - expected 10 to come from P1",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P2",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
errWanted: errBadDataFromPeer,
|
||||
},
|
||||
{name: "expected block from known peer",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P1",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}}),
|
||||
errWanted: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.pool.AddBlock(tt.args.peerID, tt.args.block, tt.args.blockSize)
|
||||
assert.Equal(t, tt.errWanted, err)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolFirstTwoBlocksAndPeers(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
firstWanted int64
|
||||
secondWanted int64
|
||||
errWanted error
|
||||
}{
|
||||
{
|
||||
name: "both blocks missing",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{
|
||||
name: "second block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 18: {"P2", true}}),
|
||||
firstWanted: 15,
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{
|
||||
name: "first block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{16: {"P2", true}, 18: {"P2", true}}),
|
||||
secondWanted: 16,
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{
|
||||
name: "both blocks present",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P2", true}}),
|
||||
firstWanted: 10,
|
||||
secondWanted: 11,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pool := tt.pool
|
||||
gotFirst, gotSecond, err := pool.FirstTwoBlocksAndPeers()
|
||||
assert.Equal(t, tt.errWanted, err)
|
||||
|
||||
if tt.firstWanted != 0 {
|
||||
peer := pool.blocks[tt.firstWanted]
|
||||
block := pool.peers[peer].blocks[tt.firstWanted]
|
||||
assert.Equal(t, block, gotFirst.block,
|
||||
"BlockPool.FirstTwoBlocksAndPeers() gotFirst = %v, want %v",
|
||||
tt.firstWanted, gotFirst.block.Height)
|
||||
}
|
||||
|
||||
if tt.secondWanted != 0 {
|
||||
peer := pool.blocks[tt.secondWanted]
|
||||
block := pool.peers[peer].blocks[tt.secondWanted]
|
||||
assert.Equal(t, block, gotSecond.block,
|
||||
"BlockPool.FirstTwoBlocksAndPeers() gotFirst = %v, want %v",
|
||||
tt.secondWanted, gotSecond.block.Height)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolInvalidateFirstTwoBlocks(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "both blocks missing",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
|
||||
},
|
||||
{
|
||||
name: "second block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 18: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{18: {"P2", true}}),
|
||||
},
|
||||
{
|
||||
name: "first block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{18: {"P1", true}, 16: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{18: {"P1", true}}),
|
||||
},
|
||||
{
|
||||
name: "both blocks present",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{},
|
||||
map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.InvalidateFirstTwoBlocks(errNoPeerResponse)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessedCurrentHeightBlock(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "one peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 101, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{101: {"P1", true}}),
|
||||
},
|
||||
{
|
||||
name: "multiple peers",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 101,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.ProcessedCurrentHeightBlock()
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovePeerAtCurrentHeight(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "one peer, remove peer for block at H",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", false}, 101: {"P1", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "one peer, remove peer for block at H+1",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "multiple peers, remove peer for block at H",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", false}, 104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
},
|
||||
{
|
||||
name: "multiple peers, remove peer for block at H+1",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", false}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.RemovePeerAtCurrentHeights(errNoPeerResponse)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
570
blockchain/v1/reactor.go
Normal file
570
blockchain/v1/reactor.go
Normal file
@@ -0,0 +1,570 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
|
||||
BlockchainChannel = byte(0x40)
|
||||
trySyncIntervalMS = 10
|
||||
trySendIntervalMS = 10
|
||||
|
||||
// ask for best height every 10s
|
||||
statusUpdateIntervalSeconds = 10
|
||||
)
|
||||
|
||||
var (
|
||||
// Maximum number of requests that can be pending per peer, i.e. for which requests have been sent but blocks
|
||||
// have not been received.
|
||||
maxRequestsPerPeer = 20
|
||||
// Maximum number of block requests for the reactor, pending or for which blocks have been received.
|
||||
maxNumRequests = 64
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// for when we switch from blockchain reactor and fast sync to
|
||||
// the consensus machine
|
||||
SwitchToConsensus(state sm.State, skipWAL bool)
|
||||
}
|
||||
|
||||
// BlockchainReactor handles long-term catchup syncing.
|
||||
type BlockchainReactor struct {
|
||||
p2p.BaseReactor
|
||||
|
||||
initialState sm.State // immutable
|
||||
state sm.State
|
||||
|
||||
blockExec *sm.BlockExecutor
|
||||
store *store.BlockStore
|
||||
|
||||
fastSync bool
|
||||
stateSynced bool
|
||||
|
||||
fsm *BcReactorFSM
|
||||
blocksSynced uint64
|
||||
|
||||
// Receive goroutine forwards messages to this channel to be processed in the context of the poolRoutine.
|
||||
messagesForFSMCh chan bcReactorMessage
|
||||
|
||||
// Switch goroutine may send RemovePeer to the blockchain reactor. This is an error message that is relayed
|
||||
// to this channel to be processed in the context of the poolRoutine.
|
||||
errorsForFSMCh chan bcReactorMessage
|
||||
|
||||
// This channel is used by the FSM and indirectly the block pool to report errors to the blockchain reactor and
|
||||
// the switch.
|
||||
eventsFromFSMCh chan bcFsmMessage
|
||||
|
||||
swReporter *behaviour.SwitchReporter
|
||||
}
|
||||
|
||||
// NewBlockchainReactor returns new reactor instance.
|
||||
func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
|
||||
fastSync bool) *BlockchainReactor {
|
||||
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
|
||||
store.Height()))
|
||||
}
|
||||
|
||||
const capacity = 1000
|
||||
eventsFromFSMCh := make(chan bcFsmMessage, capacity)
|
||||
messagesForFSMCh := make(chan bcReactorMessage, capacity)
|
||||
errorsForFSMCh := make(chan bcReactorMessage, capacity)
|
||||
|
||||
startHeight := store.Height() + 1
|
||||
if startHeight == 1 {
|
||||
startHeight = state.InitialHeight
|
||||
}
|
||||
bcR := &BlockchainReactor{
|
||||
initialState: state,
|
||||
state: state,
|
||||
blockExec: blockExec,
|
||||
fastSync: fastSync,
|
||||
store: store,
|
||||
messagesForFSMCh: messagesForFSMCh,
|
||||
eventsFromFSMCh: eventsFromFSMCh,
|
||||
errorsForFSMCh: errorsForFSMCh,
|
||||
}
|
||||
fsm := NewFSM(startHeight, bcR)
|
||||
bcR.fsm = fsm
|
||||
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
|
||||
// bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
|
||||
|
||||
return bcR
|
||||
}
|
||||
|
||||
// bcReactorMessage is used by the reactor to send messages to the FSM.
|
||||
type bcReactorMessage struct {
|
||||
event bReactorEvent
|
||||
data bReactorEventData
|
||||
}
|
||||
|
||||
type bFsmEvent uint
|
||||
|
||||
const (
|
||||
// message type events
|
||||
peerErrorEv = iota + 1
|
||||
syncFinishedEv
|
||||
)
|
||||
|
||||
type bFsmEventData struct {
|
||||
peerID p2p.ID
|
||||
err error
|
||||
}
|
||||
|
||||
// bcFsmMessage is used by the FSM to send messages to the reactor
|
||||
type bcFsmMessage struct {
|
||||
event bFsmEvent
|
||||
data bFsmEventData
|
||||
}
|
||||
|
||||
// SetLogger implements service.Service by setting the logger on reactor and pool.
|
||||
func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
|
||||
bcR.BaseService.Logger = l
|
||||
bcR.fsm.SetLogger(l)
|
||||
}
|
||||
|
||||
// OnStart implements service.Service.
|
||||
func (bcR *BlockchainReactor) OnStart() error {
|
||||
bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
|
||||
if bcR.fastSync {
|
||||
go bcR.poolRoutine()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements service.Service.
|
||||
func (bcR *BlockchainReactor) OnStop() {
|
||||
_ = bcR.Stop()
|
||||
}
|
||||
|
||||
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
|
||||
func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error {
|
||||
bcR.fastSync = true
|
||||
bcR.initialState = state
|
||||
bcR.state = state
|
||||
bcR.stateSynced = true
|
||||
|
||||
bcR.fsm = NewFSM(state.LastBlockHeight+1, bcR)
|
||||
bcR.fsm.SetLogger(bcR.Logger)
|
||||
go bcR.poolRoutine()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetChannels implements Reactor
|
||||
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: BlockchainChannel,
|
||||
Priority: 10,
|
||||
SendQueueCapacity: 2000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
MessageType: &bcproto.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
p2p.SendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
},
|
||||
}, bcR.Logger)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
// bcStatusResponseMessage from the peer and call pool.updatePeer()
|
||||
}
|
||||
|
||||
// sendBlockToPeer loads a block and sends it to the requesting peer.
|
||||
// If the block doesn't exist a bcNoBlockResponseMessage is sent.
|
||||
// If all nodes are honest, no node should be requesting for a block that doesn't exist.
|
||||
func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcproto.BlockRequest,
|
||||
src p2p.Peer) (queued bool) {
|
||||
|
||||
block := bcR.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
pbbi, err := block.ToProto()
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Could not send block message to peer", "err", err)
|
||||
return false
|
||||
}
|
||||
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.BlockResponse{Block: pbbi},
|
||||
}, bcR.Logger)
|
||||
}
|
||||
|
||||
bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
|
||||
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.NoBlockResponse{Height: msg.Height},
|
||||
}, bcR.Logger)
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcproto.StatusRequest, src p2p.Peer) (queued bool) {
|
||||
return p2p.TrySendEnvelopeShim(src, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
},
|
||||
}, bcR.Logger)
|
||||
}
|
||||
|
||||
// RemovePeer implements Reactor by removing peer from the pool.
|
||||
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
msgData := bcReactorMessage{
|
||||
event: peerRemoveEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peer.ID(),
|
||||
err: errSwitchRemovesPeer,
|
||||
},
|
||||
}
|
||||
bcR.errorsForFSMCh <- msgData
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
func (bcR *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
|
||||
if err := bc.ValidateMsg(e.Message); err != nil {
|
||||
bcR.Logger.Error("peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(e.Src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
bcR.Logger.Debug("Receive", "src", e.Src, "chID", e.ChannelID, "msg", e.Message)
|
||||
|
||||
switch msg := e.Message.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
if queued := bcR.sendBlockToPeer(msg, e.Src); !queued {
|
||||
// Unfortunately not queued since the queue is full.
|
||||
bcR.Logger.Error("Could not send block message to peer", "src", e.Src, "height", msg.Height)
|
||||
}
|
||||
|
||||
case *bcproto.StatusRequest:
|
||||
// Send peer our state.
|
||||
if queued := bcR.sendStatusResponseToPeer(msg, e.Src); !queued {
|
||||
// Unfortunately not queued since the queue is full.
|
||||
bcR.Logger.Error("Could not send status message to peer", "src", e.Src)
|
||||
}
|
||||
|
||||
case *bcproto.BlockResponse:
|
||||
bi, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("error transition block from protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: e.Src.ID(),
|
||||
height: bi.Height,
|
||||
block: bi,
|
||||
length: msg.Size(),
|
||||
},
|
||||
}
|
||||
bcR.Logger.Info("Received", "src", e.Src, "height", bi.Height)
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
case *bcproto.NoBlockResponse:
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: noBlockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: e.Src.ID(),
|
||||
height: msg.Height,
|
||||
},
|
||||
}
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", e.Src, "height", msg.Height)
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
|
||||
case *bcproto.StatusResponse:
|
||||
// Got a peer status. Unverified.
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: statusResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: e.Src.ID(),
|
||||
height: msg.Height,
|
||||
length: msg.Size(),
|
||||
},
|
||||
}
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
|
||||
default:
|
||||
bcR.Logger.Error(fmt.Sprintf("unknown message type %T", msg))
|
||||
}
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
msg := &bcproto.Message{}
|
||||
err := proto.Unmarshal(msgBytes, msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
uw, err := msg.Unwrap()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bcR.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: chID,
|
||||
Src: peer,
|
||||
Message: uw,
|
||||
})
|
||||
}
|
||||
|
||||
// processBlocksRoutine processes blocks until signlaed to stop over the stopProcessing channel
|
||||
func (bcR *BlockchainReactor) processBlocksRoutine(stopProcessing chan struct{}) {
|
||||
|
||||
processReceivedBlockTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
doProcessBlockCh := make(chan struct{}, 1)
|
||||
|
||||
lastHundred := time.Now()
|
||||
lastRate := 0.0
|
||||
|
||||
ForLoop:
|
||||
for {
|
||||
select {
|
||||
case <-stopProcessing:
|
||||
bcR.Logger.Info("finishing block execution")
|
||||
break ForLoop
|
||||
case <-processReceivedBlockTicker.C: // try to execute blocks
|
||||
select {
|
||||
case doProcessBlockCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-doProcessBlockCh:
|
||||
for {
|
||||
err := bcR.processBlock()
|
||||
if err == errMissingBlock {
|
||||
break
|
||||
}
|
||||
// Notify FSM of block processing result.
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: processedBlockEv,
|
||||
data: bReactorEventData{
|
||||
err: err,
|
||||
},
|
||||
}
|
||||
_ = bcR.fsm.Handle(&msgForFSM)
|
||||
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
bcR.blocksSynced++
|
||||
if bcR.blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
height, maxPeerHeight := bcR.fsm.Status()
|
||||
bcR.Logger.Info("Fast Sync Rate", "height", height,
|
||||
"max_peer_height", maxPeerHeight, "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// poolRoutine receives and handles messages from the Receive() routine and from the FSM.
|
||||
func (bcR *BlockchainReactor) poolRoutine() {
|
||||
|
||||
bcR.fsm.Start()
|
||||
|
||||
sendBlockRequestTicker := time.NewTicker(trySendIntervalMS * time.Millisecond)
|
||||
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
|
||||
|
||||
stopProcessing := make(chan struct{}, 1)
|
||||
go bcR.processBlocksRoutine(stopProcessing)
|
||||
|
||||
ForLoop:
|
||||
for {
|
||||
select {
|
||||
|
||||
case <-sendBlockRequestTicker.C:
|
||||
if !bcR.fsm.NeedsBlocks() {
|
||||
continue
|
||||
}
|
||||
_ = bcR.fsm.Handle(&bcReactorMessage{
|
||||
event: makeRequestsEv,
|
||||
data: bReactorEventData{
|
||||
maxNumRequests: maxNumRequests}})
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// Ask for status updates.
|
||||
go bcR.sendStatusRequest()
|
||||
|
||||
case msg := <-bcR.messagesForFSMCh:
|
||||
// Sent from the Receive() routine when status (statusResponseEv) and
|
||||
// block (blockResponseEv) response events are received
|
||||
_ = bcR.fsm.Handle(&msg)
|
||||
|
||||
case msg := <-bcR.errorsForFSMCh:
|
||||
// Sent from the switch.RemovePeer() routine (RemovePeerEv) and
|
||||
// FSM state timer expiry routine (stateTimeoutEv).
|
||||
_ = bcR.fsm.Handle(&msg)
|
||||
|
||||
case msg := <-bcR.eventsFromFSMCh:
|
||||
switch msg.event {
|
||||
case syncFinishedEv:
|
||||
stopProcessing <- struct{}{}
|
||||
// Sent from the FSM when it enters finished state.
|
||||
break ForLoop
|
||||
case peerErrorEv:
|
||||
// Sent from the FSM when it detects peer error
|
||||
bcR.reportPeerErrorToSwitch(msg.data.err, msg.data.peerID)
|
||||
if msg.data.err == errNoPeerResponse {
|
||||
// Sent from the peer timeout handler routine
|
||||
_ = bcR.fsm.Handle(&bcReactorMessage{
|
||||
event: peerRemoveEv,
|
||||
data: bReactorEventData{
|
||||
peerID: msg.data.peerID,
|
||||
err: msg.data.err,
|
||||
},
|
||||
})
|
||||
}
|
||||
// else {
|
||||
// For slow peers, or errors due to blocks received from wrong peer
|
||||
// the FSM had already removed the peers
|
||||
// }
|
||||
default:
|
||||
bcR.Logger.Error("Event from FSM not supported", "type", msg.event)
|
||||
}
|
||||
|
||||
case <-bcR.Quit():
|
||||
break ForLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) reportPeerErrorToSwitch(err error, peerID p2p.ID) {
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
if peer != nil {
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(peerID, err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) processBlock() error {
|
||||
|
||||
first, second, err := bcR.fsm.FirstTwoBlocks()
|
||||
if err != nil {
|
||||
// We need both to sync the first block.
|
||||
return err
|
||||
}
|
||||
|
||||
chainID := bcR.initialState.ChainID
|
||||
|
||||
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstPartSetHeader := firstParts.Header()
|
||||
firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
|
||||
// Finally, verify the first block using the second's commit
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
err = bcR.state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("error during commit verification", "err", err,
|
||||
"first", first.Height, "second", second.Height)
|
||||
return errBlockVerificationFailure
|
||||
}
|
||||
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
bcR.state, _, err = bcR.blockExec.ApplyBlock(bcR.state, firstID, first)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// sendStatusRequest broadcasts `BlockStore` height.
|
||||
func (bcR *BlockchainReactor) sendStatusRequest() {
|
||||
bcR.Switch.BroadcastEnvelope(p2p.Envelope{
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
})
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// BlockRequest sends `BlockRequest` height.
|
||||
func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
if peer == nil {
|
||||
return errNilPeerForBlockRequest
|
||||
}
|
||||
|
||||
queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.BlockRequest{Height: height},
|
||||
}, bcR.Logger)
|
||||
if !queued {
|
||||
return errSendQueueFull
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
func (bcR *BlockchainReactor) switchToConsensus() {
|
||||
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(bcR.state, bcR.blocksSynced > 0 || bcR.stateSynced)
|
||||
bcR.eventsFromFSMCh <- bcFsmMessage{event: syncFinishedEv}
|
||||
}
|
||||
// else {
|
||||
// Should only happen during testing.
|
||||
// }
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// Called by FSM and pool:
|
||||
// - pool calls when it detects slow peer or when peer times out
|
||||
// - FSM calls when:
|
||||
// - adding a block (addBlock) fails
|
||||
// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks
|
||||
func (bcR *BlockchainReactor) sendPeerError(err error, peerID p2p.ID) {
|
||||
bcR.Logger.Info("sendPeerError:", "peer", peerID, "error", err)
|
||||
msgData := bcFsmMessage{
|
||||
event: peerErrorEv,
|
||||
data: bFsmEventData{
|
||||
peerID: peerID,
|
||||
err: err,
|
||||
},
|
||||
}
|
||||
bcR.eventsFromFSMCh <- msgData
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
func (bcR *BlockchainReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
|
||||
if timer == nil {
|
||||
panic("nil timer pointer parameter")
|
||||
}
|
||||
if *timer == nil {
|
||||
*timer = time.AfterFunc(timeout, func() {
|
||||
msg := bcReactorMessage{
|
||||
event: stateTimeoutEv,
|
||||
data: bReactorEventData{
|
||||
stateName: name,
|
||||
},
|
||||
}
|
||||
bcR.errorsForFSMCh <- msg
|
||||
})
|
||||
} else {
|
||||
(*timer).Reset(timeout)
|
||||
}
|
||||
}
|
||||
462
blockchain/v1/reactor_fsm.go
Normal file
462
blockchain/v1/reactor_fsm.go
Normal file
@@ -0,0 +1,462 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Blockchain Reactor State
|
||||
type bcReactorFSMState struct {
|
||||
name string
|
||||
|
||||
// called when transitioning out of current state
|
||||
handle func(*BcReactorFSM, bReactorEvent, bReactorEventData) (next *bcReactorFSMState, err error)
|
||||
// called when entering the state
|
||||
enter func(fsm *BcReactorFSM)
|
||||
|
||||
// timeout to ensure FSM is not stuck in a state forever
|
||||
// the timer is owned and run by the fsm instance
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func (s *bcReactorFSMState) String() string {
|
||||
return s.name
|
||||
}
|
||||
|
||||
// BcReactorFSM is the datastructure for the Blockchain Reactor State Machine
|
||||
type BcReactorFSM struct {
|
||||
logger log.Logger
|
||||
mtx sync.Mutex
|
||||
|
||||
startTime time.Time
|
||||
|
||||
state *bcReactorFSMState
|
||||
stateTimer *time.Timer
|
||||
pool *BlockPool
|
||||
|
||||
// interface used to call the Blockchain reactor to send StatusRequest, BlockRequest, reporting errors, etc.
|
||||
toBcR bcReactor
|
||||
}
|
||||
|
||||
// NewFSM creates a new reactor FSM.
|
||||
func NewFSM(height int64, toBcR bcReactor) *BcReactorFSM {
|
||||
return &BcReactorFSM{
|
||||
state: unknown,
|
||||
startTime: time.Now(),
|
||||
pool: NewBlockPool(height, toBcR),
|
||||
toBcR: toBcR,
|
||||
}
|
||||
}
|
||||
|
||||
// bReactorEventData is part of the message sent by the reactor to the FSM and used by the state handlers.
|
||||
type bReactorEventData struct {
|
||||
peerID p2p.ID
|
||||
err error // for peer error: timeout, slow; for processed block event if error occurred
|
||||
base int64 // for status response
|
||||
height int64 // for status response; for processed block event
|
||||
block *types.Block // for block response
|
||||
stateName string // for state timeout events
|
||||
length int // for block response event, length of received block, used to detect slow peers
|
||||
maxNumRequests int // for request needed event, maximum number of pending requests
|
||||
}
|
||||
|
||||
// Blockchain Reactor Events (the input to the state machine)
|
||||
type bReactorEvent uint
|
||||
|
||||
const (
|
||||
// message type events
|
||||
startFSMEv = iota + 1
|
||||
statusResponseEv
|
||||
blockResponseEv
|
||||
noBlockResponseEv
|
||||
processedBlockEv
|
||||
makeRequestsEv
|
||||
stopFSMEv
|
||||
|
||||
// other events
|
||||
peerRemoveEv = iota + 256
|
||||
stateTimeoutEv
|
||||
)
|
||||
|
||||
func (msg *bcReactorMessage) String() string {
|
||||
var dataStr string
|
||||
|
||||
switch msg.event {
|
||||
case startFSMEv:
|
||||
dataStr = ""
|
||||
case statusResponseEv:
|
||||
dataStr = fmt.Sprintf("peer=%v base=%v height=%v", msg.data.peerID, msg.data.base, msg.data.height)
|
||||
case blockResponseEv:
|
||||
dataStr = fmt.Sprintf("peer=%v block.height=%v length=%v",
|
||||
msg.data.peerID, msg.data.block.Height, msg.data.length)
|
||||
case noBlockResponseEv:
|
||||
dataStr = fmt.Sprintf("peer=%v requested height=%v",
|
||||
msg.data.peerID, msg.data.height)
|
||||
case processedBlockEv:
|
||||
dataStr = fmt.Sprintf("error=%v", msg.data.err)
|
||||
case makeRequestsEv:
|
||||
dataStr = ""
|
||||
case stopFSMEv:
|
||||
dataStr = ""
|
||||
case peerRemoveEv:
|
||||
dataStr = fmt.Sprintf("peer: %v is being removed by the switch", msg.data.peerID)
|
||||
case stateTimeoutEv:
|
||||
dataStr = fmt.Sprintf("state=%v", msg.data.stateName)
|
||||
default:
|
||||
dataStr = "cannot interpret message data"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v: %v", msg.event, dataStr)
|
||||
}
|
||||
|
||||
func (ev bReactorEvent) String() string {
|
||||
switch ev {
|
||||
case startFSMEv:
|
||||
return "startFSMEv"
|
||||
case statusResponseEv:
|
||||
return "statusResponseEv"
|
||||
case blockResponseEv:
|
||||
return "blockResponseEv"
|
||||
case noBlockResponseEv:
|
||||
return "noBlockResponseEv"
|
||||
case processedBlockEv:
|
||||
return "processedBlockEv"
|
||||
case makeRequestsEv:
|
||||
return "makeRequestsEv"
|
||||
case stopFSMEv:
|
||||
return "stopFSMEv"
|
||||
case peerRemoveEv:
|
||||
return "peerRemoveEv"
|
||||
case stateTimeoutEv:
|
||||
return "stateTimeoutEv"
|
||||
default:
|
||||
return "event unknown"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// states
|
||||
var (
|
||||
unknown *bcReactorFSMState
|
||||
waitForPeer *bcReactorFSMState
|
||||
waitForBlock *bcReactorFSMState
|
||||
finished *bcReactorFSMState
|
||||
)
|
||||
|
||||
// timeouts for state timers
|
||||
const (
|
||||
waitForPeerTimeout = 3 * time.Second
|
||||
waitForBlockAtCurrentHeightTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
// errors
|
||||
var (
|
||||
// internal to the package
|
||||
errNoErrorFinished = errors.New("fast sync is finished")
|
||||
errInvalidEvent = errors.New("invalid event in current state")
|
||||
errMissingBlock = errors.New("missing blocks")
|
||||
errNilPeerForBlockRequest = errors.New("peer for block request does not exist in the switch")
|
||||
errSendQueueFull = errors.New("block request not made, send-queue is full")
|
||||
errPeerTooShort = errors.New("peer height too low, old peer removed/ new peer not added")
|
||||
errSwitchRemovesPeer = errors.New("switch is removing peer")
|
||||
errTimeoutEventWrongState = errors.New("timeout event for a state different than the current one")
|
||||
errNoTallerPeer = errors.New("fast sync timed out on waiting for a peer taller than this node")
|
||||
|
||||
// reported eventually to the switch
|
||||
// handle return
|
||||
errPeerLowersItsHeight = errors.New("fast sync peer reports a height lower than previous")
|
||||
// handle return
|
||||
errNoPeerResponseForCurrentHeights = errors.New("fast sync timed out on peer block response for current heights")
|
||||
errNoPeerResponse = errors.New("fast sync timed out on peer block response") // xx
|
||||
errBadDataFromPeer = errors.New("fast sync received block from wrong peer or block is bad") // xx
|
||||
errDuplicateBlock = errors.New("fast sync received duplicate block from peer")
|
||||
errBlockVerificationFailure = errors.New("fast sync block verification failure") // xx
|
||||
errSlowPeer = errors.New("fast sync peer is not sending us data fast enough") // xx
|
||||
|
||||
)
|
||||
|
||||
func init() {
|
||||
unknown = &bcReactorFSMState{
|
||||
name: "unknown",
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
switch ev {
|
||||
case startFSMEv:
|
||||
// Broadcast Status message. Currently doesn't return non-nil error.
|
||||
fsm.toBcR.sendStatusRequest()
|
||||
return waitForPeer, nil
|
||||
|
||||
case stopFSMEv:
|
||||
return finished, errNoErrorFinished
|
||||
|
||||
default:
|
||||
return unknown, errInvalidEvent
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
waitForPeer = &bcReactorFSMState{
|
||||
name: "waitForPeer",
|
||||
timeout: waitForPeerTimeout,
|
||||
enter: func(fsm *BcReactorFSM) {
|
||||
// Stop when leaving the state.
|
||||
fsm.resetStateTimer()
|
||||
},
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
switch ev {
|
||||
case stateTimeoutEv:
|
||||
if data.stateName != "waitForPeer" {
|
||||
fsm.logger.Error("received a state timeout event for different state",
|
||||
"state", data.stateName)
|
||||
return waitForPeer, errTimeoutEventWrongState
|
||||
}
|
||||
// There was no statusResponse received from any peer.
|
||||
// Should we send status request again?
|
||||
return finished, errNoTallerPeer
|
||||
|
||||
case statusResponseEv:
|
||||
if err := fsm.pool.UpdatePeer(data.peerID, data.base, data.height); err != nil {
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, err
|
||||
}
|
||||
}
|
||||
if fsm.stateTimer != nil {
|
||||
fsm.stateTimer.Stop()
|
||||
}
|
||||
return waitForBlock, nil
|
||||
|
||||
case stopFSMEv:
|
||||
if fsm.stateTimer != nil {
|
||||
fsm.stateTimer.Stop()
|
||||
}
|
||||
return finished, errNoErrorFinished
|
||||
|
||||
default:
|
||||
return waitForPeer, errInvalidEvent
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
waitForBlock = &bcReactorFSMState{
|
||||
name: "waitForBlock",
|
||||
timeout: waitForBlockAtCurrentHeightTimeout,
|
||||
enter: func(fsm *BcReactorFSM) {
|
||||
// Stop when leaving the state.
|
||||
fsm.resetStateTimer()
|
||||
},
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
switch ev {
|
||||
|
||||
case statusResponseEv:
|
||||
err := fsm.pool.UpdatePeer(data.peerID, data.base, data.height)
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, err
|
||||
}
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, err
|
||||
}
|
||||
return waitForBlock, err
|
||||
|
||||
case blockResponseEv:
|
||||
fsm.logger.Debug("blockResponseEv", "H", data.block.Height)
|
||||
err := fsm.pool.AddBlock(data.peerID, data.block, data.length)
|
||||
if err != nil {
|
||||
// A block was received that was unsolicited, from unexpected peer, or that we already have it.
|
||||
// Ignore block, remove peer and send error to switch.
|
||||
fsm.pool.RemovePeer(data.peerID, err)
|
||||
fsm.toBcR.sendPeerError(err, data.peerID)
|
||||
}
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, err
|
||||
}
|
||||
return waitForBlock, err
|
||||
case noBlockResponseEv:
|
||||
fsm.logger.Error("peer does not have requested block", "peer", data.peerID)
|
||||
|
||||
return waitForBlock, nil
|
||||
case processedBlockEv:
|
||||
if data.err != nil {
|
||||
first, second, _ := fsm.pool.FirstTwoBlocksAndPeers()
|
||||
fsm.logger.Error("error processing block", "err", data.err,
|
||||
"first", first.block.Height, "second", second.block.Height)
|
||||
fsm.logger.Error("send peer error for", "peer", first.peer.ID)
|
||||
fsm.toBcR.sendPeerError(data.err, first.peer.ID)
|
||||
fsm.logger.Error("send peer error for", "peer", second.peer.ID)
|
||||
fsm.toBcR.sendPeerError(data.err, second.peer.ID)
|
||||
// Remove the first two blocks. This will also remove the peers
|
||||
fsm.pool.InvalidateFirstTwoBlocks(data.err)
|
||||
} else {
|
||||
fsm.pool.ProcessedCurrentHeightBlock()
|
||||
// Since we advanced one block reset the state timer
|
||||
fsm.resetStateTimer()
|
||||
}
|
||||
|
||||
// Both cases above may result in achieving maximum height.
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, nil
|
||||
}
|
||||
|
||||
return waitForBlock, data.err
|
||||
|
||||
case peerRemoveEv:
|
||||
// This event is sent by the switch to remove disconnected and errored peers.
|
||||
fsm.pool.RemovePeer(data.peerID, data.err)
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, nil
|
||||
}
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, nil
|
||||
}
|
||||
return waitForBlock, nil
|
||||
|
||||
case makeRequestsEv:
|
||||
fsm.makeNextRequests(data.maxNumRequests)
|
||||
return waitForBlock, nil
|
||||
|
||||
case stateTimeoutEv:
|
||||
if data.stateName != "waitForBlock" {
|
||||
fsm.logger.Error("received a state timeout event for different state",
|
||||
"state", data.stateName)
|
||||
return waitForBlock, errTimeoutEventWrongState
|
||||
}
|
||||
// We haven't received the block at current height or height+1. Remove peer.
|
||||
fsm.pool.RemovePeerAtCurrentHeights(errNoPeerResponseForCurrentHeights)
|
||||
fsm.resetStateTimer()
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, errNoPeerResponseForCurrentHeights
|
||||
}
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, nil
|
||||
}
|
||||
return waitForBlock, errNoPeerResponseForCurrentHeights
|
||||
|
||||
case stopFSMEv:
|
||||
if fsm.stateTimer != nil {
|
||||
fsm.stateTimer.Stop()
|
||||
}
|
||||
return finished, errNoErrorFinished
|
||||
|
||||
default:
|
||||
return waitForBlock, errInvalidEvent
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
finished = &bcReactorFSMState{
|
||||
name: "finished",
|
||||
enter: func(fsm *BcReactorFSM) {
|
||||
fsm.logger.Info("Time to switch to consensus reactor!", "height", fsm.pool.Height)
|
||||
fsm.toBcR.switchToConsensus()
|
||||
fsm.cleanup()
|
||||
},
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
return finished, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Interface used by FSM for sending Block and Status requests,
|
||||
// informing of peer errors and state timeouts
|
||||
// Implemented by BlockchainReactor and tests
|
||||
type bcReactor interface {
|
||||
sendStatusRequest()
|
||||
sendBlockRequest(peerID p2p.ID, height int64) error
|
||||
sendPeerError(err error, peerID p2p.ID)
|
||||
resetStateTimer(name string, timer **time.Timer, timeout time.Duration)
|
||||
switchToConsensus()
|
||||
}
|
||||
|
||||
// SetLogger sets the FSM logger.
|
||||
func (fsm *BcReactorFSM) SetLogger(l log.Logger) {
|
||||
fsm.logger = l
|
||||
fsm.pool.SetLogger(l)
|
||||
}
|
||||
|
||||
// Start starts the FSM.
|
||||
func (fsm *BcReactorFSM) Start() {
|
||||
_ = fsm.Handle(&bcReactorMessage{event: startFSMEv})
|
||||
}
|
||||
|
||||
// Handle processes messages and events sent to the FSM.
|
||||
func (fsm *BcReactorFSM) Handle(msg *bcReactorMessage) error {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
fsm.logger.Debug("FSM received", "event", msg, "state", fsm.state)
|
||||
|
||||
if fsm.state == nil {
|
||||
fsm.state = unknown
|
||||
}
|
||||
next, err := fsm.state.handle(fsm, msg.event, msg.data)
|
||||
if err != nil {
|
||||
fsm.logger.Error("FSM event handler returned", "err", err,
|
||||
"state", fsm.state, "event", msg.event)
|
||||
}
|
||||
|
||||
oldState := fsm.state.name
|
||||
fsm.transition(next)
|
||||
if oldState != fsm.state.name {
|
||||
fsm.logger.Info("FSM changed state", "new_state", fsm.state)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) transition(next *bcReactorFSMState) {
|
||||
if next == nil {
|
||||
return
|
||||
}
|
||||
if fsm.state != next {
|
||||
fsm.state = next
|
||||
if next.enter != nil {
|
||||
next.enter(fsm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Called when entering an FSM state in order to detect lack of progress in the state machine.
|
||||
// Note the use of the 'bcr' interface to facilitate testing without timer expiring.
|
||||
func (fsm *BcReactorFSM) resetStateTimer() {
|
||||
fsm.toBcR.resetStateTimer(fsm.state.name, &fsm.stateTimer, fsm.state.timeout)
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) isCaughtUp() bool {
|
||||
return fsm.state == finished
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) makeNextRequests(maxNumRequests int) {
|
||||
fsm.pool.MakeNextRequests(maxNumRequests)
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) cleanup() {
|
||||
fsm.pool.Cleanup()
|
||||
}
|
||||
|
||||
// NeedsBlocks checks if more block requests are required.
|
||||
func (fsm *BcReactorFSM) NeedsBlocks() bool {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
return fsm.state.name == "waitForBlock" && fsm.pool.NeedsBlocks()
|
||||
}
|
||||
|
||||
// FirstTwoBlocks returns the two blocks at pool height and height+1
|
||||
func (fsm *BcReactorFSM) FirstTwoBlocks() (first, second *types.Block, err error) {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
firstBP, secondBP, err := fsm.pool.FirstTwoBlocksAndPeers()
|
||||
if err == nil {
|
||||
first = firstBP.block
|
||||
second = secondBP.block
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Status returns the pool's height and the maximum peer height.
|
||||
func (fsm *BcReactorFSM) Status() (height, maxPeerHeight int64) {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
return fsm.pool.Height, fsm.pool.MaxPeerHeight
|
||||
}
|
||||
944
blockchain/v1/reactor_fsm_test.go
Normal file
944
blockchain/v1/reactor_fsm_test.go
Normal file
@@ -0,0 +1,944 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type lastBlockRequestT struct {
|
||||
peerID p2p.ID
|
||||
height int64
|
||||
}
|
||||
|
||||
type lastPeerErrorT struct {
|
||||
peerID p2p.ID
|
||||
err error
|
||||
}
|
||||
|
||||
// reactor for FSM testing
|
||||
type testReactor struct {
|
||||
logger log.Logger
|
||||
fsm *BcReactorFSM
|
||||
numStatusRequests int
|
||||
numBlockRequests int
|
||||
lastBlockRequest lastBlockRequestT
|
||||
lastPeerError lastPeerErrorT
|
||||
stateTimerStarts map[string]int
|
||||
}
|
||||
|
||||
func sendEventToFSM(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) error {
|
||||
return fsm.Handle(&bcReactorMessage{event: ev, data: data})
|
||||
}
|
||||
|
||||
type fsmStepTestValues struct {
|
||||
currentState string
|
||||
event bReactorEvent
|
||||
data bReactorEventData
|
||||
|
||||
wantErr error
|
||||
wantState string
|
||||
wantStatusReqSent bool
|
||||
wantReqIncreased bool
|
||||
wantNewBlocks []int64
|
||||
wantRemovedPeers []p2p.ID
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// helper test function for different FSM events, state and expected behavior
|
||||
func sStopFSMEv(current, expected string) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: stopFSMEv,
|
||||
wantState: expected,
|
||||
wantErr: errNoErrorFinished}
|
||||
}
|
||||
|
||||
func sUnknownFSMEv(current string) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: 1234,
|
||||
wantState: current,
|
||||
wantErr: errInvalidEvent}
|
||||
}
|
||||
|
||||
func sStartFSMEv() fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: "unknown",
|
||||
event: startFSMEv,
|
||||
wantState: "waitForPeer",
|
||||
wantStatusReqSent: true}
|
||||
}
|
||||
|
||||
func sStateTimeoutEv(current, expected string, timedoutState string, wantErr error) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: stateTimeoutEv,
|
||||
data: bReactorEventData{
|
||||
stateName: timedoutState,
|
||||
},
|
||||
wantState: expected,
|
||||
wantErr: wantErr,
|
||||
}
|
||||
}
|
||||
|
||||
func sProcessedBlockEv(current, expected string, reactorError error) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: processedBlockEv,
|
||||
data: bReactorEventData{
|
||||
err: reactorError,
|
||||
},
|
||||
wantState: expected,
|
||||
wantErr: reactorError,
|
||||
}
|
||||
}
|
||||
|
||||
func sStatusEv(current, expected string, peerID p2p.ID, height int64, err error) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: statusResponseEv,
|
||||
data: bReactorEventData{peerID: peerID, height: height},
|
||||
wantState: expected,
|
||||
wantErr: err}
|
||||
}
|
||||
|
||||
func sMakeRequestsEv(current, expected string, maxPendingRequests int) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: makeRequestsEv,
|
||||
data: bReactorEventData{maxNumRequests: maxPendingRequests},
|
||||
wantState: expected,
|
||||
wantReqIncreased: true,
|
||||
}
|
||||
}
|
||||
|
||||
func sMakeRequestsEvErrored(current, expected string,
|
||||
maxPendingRequests int, err error, peersRemoved []p2p.ID) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: makeRequestsEv,
|
||||
data: bReactorEventData{maxNumRequests: maxPendingRequests},
|
||||
wantState: expected,
|
||||
wantErr: err,
|
||||
wantRemovedPeers: peersRemoved,
|
||||
wantReqIncreased: true,
|
||||
}
|
||||
}
|
||||
|
||||
func sBlockRespEv(current, expected string, peerID p2p.ID, height int64, prevBlocks []int64) fsmStepTestValues {
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peerID,
|
||||
height: height,
|
||||
block: types.MakeBlock(height, txs, nil, nil),
|
||||
length: 100},
|
||||
wantState: expected,
|
||||
wantNewBlocks: append(prevBlocks, height),
|
||||
}
|
||||
}
|
||||
|
||||
func sBlockRespEvErrored(current, expected string,
|
||||
peerID p2p.ID, height int64, prevBlocks []int64, wantErr error, peersRemoved []p2p.ID) fsmStepTestValues {
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peerID,
|
||||
height: height,
|
||||
block: types.MakeBlock(height, txs, nil, nil),
|
||||
length: 100},
|
||||
wantState: expected,
|
||||
wantErr: wantErr,
|
||||
wantRemovedPeers: peersRemoved,
|
||||
wantNewBlocks: prevBlocks,
|
||||
}
|
||||
}
|
||||
|
||||
func sPeerRemoveEv(current, expected string, peerID p2p.ID, err error, peersRemoved []p2p.ID) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: peerRemoveEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peerID,
|
||||
err: err,
|
||||
},
|
||||
wantState: expected,
|
||||
wantRemovedPeers: peersRemoved,
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------
|
||||
|
||||
func newTestReactor(height int64) *testReactor {
|
||||
testBcR := &testReactor{logger: log.TestingLogger(), stateTimerStarts: make(map[string]int)}
|
||||
testBcR.fsm = NewFSM(height, testBcR)
|
||||
testBcR.fsm.SetLogger(testBcR.logger)
|
||||
return testBcR
|
||||
}
|
||||
|
||||
func fixBlockResponseEvStep(step *fsmStepTestValues, testBcR *testReactor) {
|
||||
// There is currently no good way to know to which peer a block request was sent.
|
||||
// So in some cases where it does not matter, before we simulate a block response
|
||||
// we cheat and look where it is expected from.
|
||||
if step.event == blockResponseEv {
|
||||
height := step.data.height
|
||||
peerID, ok := testBcR.fsm.pool.blocks[height]
|
||||
if ok {
|
||||
step.data.peerID = peerID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testFields struct {
|
||||
name string
|
||||
startingHeight int64
|
||||
maxRequestsPerPeer int
|
||||
maxPendingRequests int
|
||||
steps []fsmStepTestValues
|
||||
}
|
||||
|
||||
func executeFSMTests(t *testing.T, tests []testFields, matchRespToReq bool) {
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create test reactor
|
||||
testBcR := newTestReactor(tt.startingHeight)
|
||||
|
||||
if tt.maxRequestsPerPeer != 0 {
|
||||
maxRequestsPerPeer = tt.maxRequestsPerPeer
|
||||
}
|
||||
|
||||
for _, step := range tt.steps {
|
||||
step := step
|
||||
assert.Equal(t, step.currentState, testBcR.fsm.state.name)
|
||||
|
||||
var heightBefore int64
|
||||
if step.event == processedBlockEv && step.data.err == errBlockVerificationFailure {
|
||||
heightBefore = testBcR.fsm.pool.Height
|
||||
}
|
||||
oldNumStatusRequests := testBcR.numStatusRequests
|
||||
oldNumBlockRequests := testBcR.numBlockRequests
|
||||
if matchRespToReq {
|
||||
fixBlockResponseEvStep(&step, testBcR)
|
||||
}
|
||||
|
||||
fsmErr := sendEventToFSM(testBcR.fsm, step.event, step.data)
|
||||
assert.Equal(t, step.wantErr, fsmErr)
|
||||
|
||||
if step.wantStatusReqSent {
|
||||
assert.Equal(t, oldNumStatusRequests+1, testBcR.numStatusRequests)
|
||||
} else {
|
||||
assert.Equal(t, oldNumStatusRequests, testBcR.numStatusRequests)
|
||||
}
|
||||
|
||||
if step.wantReqIncreased {
|
||||
assert.True(t, oldNumBlockRequests < testBcR.numBlockRequests)
|
||||
} else {
|
||||
assert.Equal(t, oldNumBlockRequests, testBcR.numBlockRequests)
|
||||
}
|
||||
|
||||
for _, height := range step.wantNewBlocks {
|
||||
_, err := testBcR.fsm.pool.BlockAndPeerAtHeight(height)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
if step.event == processedBlockEv && step.data.err == errBlockVerificationFailure {
|
||||
heightAfter := testBcR.fsm.pool.Height
|
||||
assert.Equal(t, heightBefore, heightAfter)
|
||||
firstAfter, err1 := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height)
|
||||
secondAfter, err2 := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height + 1)
|
||||
assert.NotNil(t, err1)
|
||||
assert.NotNil(t, err2)
|
||||
assert.Nil(t, firstAfter)
|
||||
assert.Nil(t, secondAfter)
|
||||
}
|
||||
|
||||
assert.Equal(t, step.wantState, testBcR.fsm.state.name)
|
||||
|
||||
if step.wantState == "finished" {
|
||||
assert.True(t, testBcR.fsm.isCaughtUp())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSMBasic(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "one block, one peer - TS2",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 2,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 2, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multi block, multi peer - TS2",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 2,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 4, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 4, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 4, []int64{1, 2, 3}),
|
||||
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, true)
|
||||
}
|
||||
|
||||
func TestFSMBlockVerificationFailure(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "block verification failure - TS2 variant",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
|
||||
// add P1 and get blocks 1-3 from it
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
|
||||
// process block failure, should remove P1 and all blocks
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", errBlockVerificationFailure),
|
||||
|
||||
// get blocks 1-3 from P2
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}),
|
||||
|
||||
// finish after processing blocks 1 and 2
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMBadBlockFromPeer(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "block we haven't asked for",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and ask for blocks 1-3
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 300, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// blockResponseEv for height 100 should cause an error
|
||||
sBlockRespEvErrored("waitForBlock", "waitForPeer",
|
||||
"P1", 100, []int64{}, errMissingBlock, []p2p.ID{}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block we already have",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and get block 1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 100, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P1", 1, []int64{}),
|
||||
|
||||
// Get block 1 again. Since peer is removed together with block 1,
|
||||
// the blocks present in the pool should be {}
|
||||
sBlockRespEvErrored("waitForBlock", "waitForPeer",
|
||||
"P1", 1, []int64{}, errDuplicateBlock, []p2p.ID{"P1"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block from unknown peer",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and get block 1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
|
||||
// get block 1 from unknown peer P2
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEvErrored("waitForBlock", "waitForBlock",
|
||||
"P2", 1, []int64{}, errBadDataFromPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block from wrong peer",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, make requests for blocks 1-3 to P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
|
||||
// receive block 1 from P2
|
||||
sBlockRespEvErrored("waitForBlock", "waitForBlock",
|
||||
"P2", 1, []int64{}, errBadDataFromPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMBlockAtCurrentHeightDoesNotArriveInTime(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "block at current height undelivered - TS5",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, get blocks 1 and 2, process block 1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P1", 2, []int64{1}),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
|
||||
// timeout on block 3, P1 should be removed
|
||||
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForBlock", errNoPeerResponseForCurrentHeights),
|
||||
|
||||
// make requests and finish by receiving blocks 2 and 3 from P2
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{2}),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block at current height undelivered, at maxPeerHeight after peer removal - TS3",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, request blocks 1-3 from P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// add P2 (tallest)
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 30, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// receive blocks 1-3 from P1
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}),
|
||||
|
||||
// process blocks at heights 1 and 2
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// timeout on block at height 4
|
||||
sStateTimeoutEv("waitForBlock", "finished", "waitForBlock", nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, true)
|
||||
}
|
||||
|
||||
func TestFSMPeerRelatedEvents(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "peer remove event with no blocks",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, P2, P3
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P3", 3, nil),
|
||||
|
||||
// switch removes P2
|
||||
sPeerRemoveEv("waitForBlock", "waitForBlock", "P2", errSwitchRemovesPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only peer removed while in waitForBlock state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
|
||||
|
||||
// switch removes P1
|
||||
sPeerRemoveEv("waitForBlock", "waitForPeer", "P1", errSwitchRemovesPeer, []p2p.ID{"P1"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "highest peer removed while in waitForBlock state, node reaches maxPeerHeight - TS4 ",
|
||||
startingHeight: 100,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and make requests
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 101, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 200, nil),
|
||||
|
||||
// get blocks 100 and 101 from P1 and process block at height 100
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 100, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 101, []int64{100}),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// switch removes peer P1, should be finished
|
||||
sPeerRemoveEv("waitForBlock", "finished", "P2", errSwitchRemovesPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "highest peer lowers its height in waitForBlock state, node reaches maxPeerHeight - TS4",
|
||||
startingHeight: 100,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and make requests
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 101, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 200, nil),
|
||||
|
||||
// get blocks 100 and 101 from P1
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 100, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 101, []int64{100}),
|
||||
|
||||
// processed block at heights 100
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// P2 becomes short
|
||||
sStatusEv("waitForBlock", "finished", "P2", 100, errPeerLowersItsHeight),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new short peer while in waitForPeer state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForPeer", "P1", 3, errPeerTooShort),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new short peer while in waitForBlock state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, errPeerTooShort),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only peer updated with low height while in waitForBlock state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
|
||||
sStatusEv("waitForBlock", "waitForPeer", "P1", 3, errPeerLowersItsHeight),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "peer does not exist in the switch",
|
||||
startingHeight: 9999999,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 20000000, nil),
|
||||
// send request for block 9999999
|
||||
// Note: For this block request the "switch missing the peer" error is simulated,
|
||||
// see implementation of bcReactor interface, sendBlockRequest(), in this file.
|
||||
sMakeRequestsEvErrored("waitForBlock", "waitForBlock",
|
||||
maxNumRequests, nil, []p2p.ID{"P1"}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, true)
|
||||
}
|
||||
|
||||
func TestFSMStopFSM(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "stopFSMEv in unknown",
|
||||
steps: []fsmStepTestValues{
|
||||
sStopFSMEv("unknown", "finished"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "stopFSMEv in waitForPeer",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStopFSMEv("waitForPeer", "finished"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "stopFSMEv in waitForBlock",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sStopFSMEv("waitForBlock", "finished"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMUnknownElements(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "unknown event for state unknown",
|
||||
steps: []fsmStepTestValues{
|
||||
sUnknownFSMEv("unknown"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown event for state waitForPeer",
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sUnknownFSMEv("waitForPeer"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown event for state waitForBlock",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sUnknownFSMEv("waitForBlock"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMPeerStateTimeoutEvent(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "timeout event for state waitForPeer while in state waitForPeer - TS1",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStateTimeoutEv("waitForPeer", "finished", "waitForPeer", errNoTallerPeer),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForPeer while in a state != waitForPeer",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStateTimeoutEv("waitForPeer", "waitForPeer", "waitForBlock", errTimeoutEventWrongState),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForBlock while in state waitForBlock ",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sStateTimeoutEv("waitForBlock", "waitForPeer", "waitForBlock", errNoPeerResponseForCurrentHeights),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForBlock while in a state != waitForBlock",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForPeer", errTimeoutEventWrongState),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForBlock with multiple peers",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForBlock", errNoPeerResponseForCurrentHeights),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func makeCorrectTransitionSequence(startingHeight int64, numBlocks int64, numPeers int, randomPeerHeights bool,
|
||||
maxRequestsPerPeer int, maxPendingRequests int) testFields {
|
||||
|
||||
// Generate numPeers peers with random or numBlocks heights according to the randomPeerHeights flag.
|
||||
peerHeights := make([]int64, numPeers)
|
||||
for i := 0; i < numPeers; i++ {
|
||||
if i == 0 {
|
||||
peerHeights[0] = numBlocks
|
||||
continue
|
||||
}
|
||||
if randomPeerHeights {
|
||||
peerHeights[i] = int64(tmmath.MaxInt(tmrand.Intn(int(numBlocks)), int(startingHeight)+1))
|
||||
} else {
|
||||
peerHeights[i] = numBlocks
|
||||
}
|
||||
}
|
||||
|
||||
// Approximate the slice capacity to save time for appends.
|
||||
testSteps := make([]fsmStepTestValues, 0, 3*numBlocks+int64(numPeers))
|
||||
|
||||
testName := fmt.Sprintf("%v-blocks %v-startingHeight %v-peers %v-maxRequestsPerPeer %v-maxNumRequests",
|
||||
numBlocks, startingHeight, numPeers, maxRequestsPerPeer, maxPendingRequests)
|
||||
|
||||
// Add startFSMEv step.
|
||||
testSteps = append(testSteps, sStartFSMEv())
|
||||
|
||||
// For each peer, add statusResponseEv step.
|
||||
for i := 0; i < numPeers; i++ {
|
||||
peerName := fmt.Sprintf("P%d", i)
|
||||
if i == 0 {
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sStatusEv("waitForPeer", "waitForBlock", p2p.ID(peerName), peerHeights[i], nil))
|
||||
} else {
|
||||
testSteps = append(testSteps,
|
||||
sStatusEv("waitForBlock", "waitForBlock", p2p.ID(peerName), peerHeights[i], nil))
|
||||
}
|
||||
}
|
||||
|
||||
height := startingHeight
|
||||
numBlocksReceived := 0
|
||||
prevBlocks := make([]int64, 0, maxPendingRequests)
|
||||
|
||||
forLoop:
|
||||
for i := 0; i < int(numBlocks); i++ {
|
||||
|
||||
// Add the makeRequestEv step periodically.
|
||||
if i%maxRequestsPerPeer == 0 {
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
)
|
||||
}
|
||||
|
||||
// Add the blockRespEv step
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P0", height, prevBlocks))
|
||||
prevBlocks = append(prevBlocks, height)
|
||||
height++
|
||||
numBlocksReceived++
|
||||
|
||||
// Add the processedBlockEv step periodically.
|
||||
if numBlocksReceived >= maxRequestsPerPeer || height >= numBlocks {
|
||||
for j := int(height) - numBlocksReceived; j < int(height); j++ {
|
||||
if j >= int(numBlocks) {
|
||||
// This is the last block that is processed, we should be in "finished" state.
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil))
|
||||
break forLoop
|
||||
}
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil))
|
||||
}
|
||||
numBlocksReceived = 0
|
||||
prevBlocks = make([]int64, 0, maxPendingRequests)
|
||||
}
|
||||
}
|
||||
|
||||
return testFields{
|
||||
name: testName,
|
||||
startingHeight: startingHeight,
|
||||
maxRequestsPerPeer: maxRequestsPerPeer,
|
||||
maxPendingRequests: maxPendingRequests,
|
||||
steps: testSteps,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
maxStartingHeightTest = 100
|
||||
maxRequestsPerPeerTest = 20
|
||||
maxTotalPendingRequestsTest = 600
|
||||
maxNumPeersTest = 1000
|
||||
maxNumBlocksInChainTest = 10000 // should be smaller than 9999999
|
||||
)
|
||||
|
||||
func makeCorrectTransitionSequenceWithRandomParameters() testFields {
|
||||
// Generate a starting height for fast sync.
|
||||
startingHeight := int64(tmrand.Intn(maxStartingHeightTest) + 1)
|
||||
|
||||
// Generate the number of requests per peer.
|
||||
maxRequestsPerPeer := tmrand.Intn(maxRequestsPerPeerTest) + 1
|
||||
|
||||
// Generate the maximum number of total pending requests, >= maxRequestsPerPeer.
|
||||
maxPendingRequests := tmrand.Intn(maxTotalPendingRequestsTest-maxRequestsPerPeer) + maxRequestsPerPeer
|
||||
|
||||
// Generate the number of blocks to be synced.
|
||||
numBlocks := int64(tmrand.Intn(maxNumBlocksInChainTest)) + startingHeight
|
||||
|
||||
// Generate a number of peers.
|
||||
numPeers := tmrand.Intn(maxNumPeersTest) + 1
|
||||
|
||||
return makeCorrectTransitionSequence(startingHeight, numBlocks, numPeers, true, maxRequestsPerPeer, maxPendingRequests)
|
||||
}
|
||||
|
||||
func shouldApplyProcessedBlockEvStep(step *fsmStepTestValues, testBcR *testReactor) bool {
|
||||
if step.event == processedBlockEv {
|
||||
_, err := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height)
|
||||
if err == errMissingBlock {
|
||||
return false
|
||||
}
|
||||
_, err = testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height + 1)
|
||||
if err == errMissingBlock {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestFSMCorrectTransitionSequences(t *testing.T) {
|
||||
|
||||
tests := []testFields{
|
||||
makeCorrectTransitionSequence(1, 100, 10, true, 10, 40),
|
||||
makeCorrectTransitionSequenceWithRandomParameters(),
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create test reactor
|
||||
testBcR := newTestReactor(tt.startingHeight)
|
||||
|
||||
if tt.maxRequestsPerPeer != 0 {
|
||||
maxRequestsPerPeer = tt.maxRequestsPerPeer
|
||||
}
|
||||
|
||||
for _, step := range tt.steps {
|
||||
step := step
|
||||
assert.Equal(t, step.currentState, testBcR.fsm.state.name)
|
||||
|
||||
oldNumStatusRequests := testBcR.numStatusRequests
|
||||
fixBlockResponseEvStep(&step, testBcR)
|
||||
if !shouldApplyProcessedBlockEvStep(&step, testBcR) {
|
||||
continue
|
||||
}
|
||||
|
||||
fsmErr := sendEventToFSM(testBcR.fsm, step.event, step.data)
|
||||
assert.Equal(t, step.wantErr, fsmErr)
|
||||
|
||||
if step.wantStatusReqSent {
|
||||
assert.Equal(t, oldNumStatusRequests+1, testBcR.numStatusRequests)
|
||||
} else {
|
||||
assert.Equal(t, oldNumStatusRequests, testBcR.numStatusRequests)
|
||||
}
|
||||
|
||||
assert.Equal(t, step.wantState, testBcR.fsm.state.name)
|
||||
if step.wantState == "finished" {
|
||||
assert.True(t, testBcR.fsm.isCaughtUp())
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------
|
||||
// implements the bcRNotifier
|
||||
func (testR *testReactor) sendPeerError(err error, peerID p2p.ID) {
|
||||
testR.logger.Info("Reactor received sendPeerError call from FSM", "peer", peerID, "err", err)
|
||||
testR.lastPeerError.peerID = peerID
|
||||
testR.lastPeerError.err = err
|
||||
}
|
||||
|
||||
func (testR *testReactor) sendStatusRequest() {
|
||||
testR.logger.Info("Reactor received sendStatusRequest call from FSM")
|
||||
testR.numStatusRequests++
|
||||
}
|
||||
|
||||
func (testR *testReactor) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
testR.logger.Info("Reactor received sendBlockRequest call from FSM", "peer", peerID, "height", height)
|
||||
testR.numBlockRequests++
|
||||
testR.lastBlockRequest.peerID = peerID
|
||||
testR.lastBlockRequest.height = height
|
||||
if height == 9999999 {
|
||||
// simulate switch does not have peer
|
||||
return errNilPeerForBlockRequest
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (testR *testReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
|
||||
testR.logger.Info("Reactor received resetStateTimer call from FSM", "state", name, "timeout", timeout)
|
||||
if _, ok := testR.stateTimerStarts[name]; !ok {
|
||||
testR.stateTimerStarts[name] = 1
|
||||
} else {
|
||||
testR.stateTimerStarts[name]++
|
||||
}
|
||||
}
|
||||
|
||||
func (testR *testReactor) switchToConsensus() {
|
||||
}
|
||||
|
||||
// ----------------------------------------
|
||||
390
blockchain/v1/reactor_test.go
Normal file
390
blockchain/v1/reactor_test.go
Normal file
@@ -0,0 +1,390 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mempool/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
var config *cfg.Config
|
||||
|
||||
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
for i := 0; i < numValidators; i++ {
|
||||
val, privVal := types.RandValidator(randPower, minPower)
|
||||
validators[i] = types.GenesisValidator{
|
||||
PubKey: val.PubKey,
|
||||
Power: val.VotingPower,
|
||||
}
|
||||
privValidators[i] = privVal
|
||||
}
|
||||
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
||||
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: config.ChainID(),
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
func makeVote(
|
||||
t *testing.T,
|
||||
header *types.Header,
|
||||
blockID types.BlockID,
|
||||
valset *types.ValidatorSet,
|
||||
privVal types.PrivValidator) *types.Vote {
|
||||
|
||||
pubKey, err := privVal.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
valIdx, _ := valset.GetByAddress(pubKey.Address())
|
||||
vote := &types.Vote{
|
||||
ValidatorAddress: pubKey.Address(),
|
||||
ValidatorIndex: valIdx,
|
||||
Height: header.Height,
|
||||
Round: 1,
|
||||
Timestamp: tmtime.Now(),
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: blockID,
|
||||
}
|
||||
|
||||
vpb := vote.ToProto()
|
||||
|
||||
_ = privVal.SignVote(header.ChainID, vpb)
|
||||
vote.Signature = vpb.Signature
|
||||
|
||||
return vote
|
||||
}
|
||||
|
||||
type BlockchainReactorPair struct {
|
||||
bcR *BlockchainReactor
|
||||
conR *consensusReactorTest
|
||||
}
|
||||
|
||||
func newBlockchainReactor(
|
||||
t *testing.T,
|
||||
logger log.Logger,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64) *BlockchainReactor {
|
||||
if len(privVals) != 1 {
|
||||
panic("only support one validator")
|
||||
}
|
||||
|
||||
app := &testApp{}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error start app: %w", err))
|
||||
}
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
|
||||
}
|
||||
|
||||
// Make the BlockchainReactor itself.
|
||||
// NOTE we have to create and commit the blocks first because
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// let's add some blocks in
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := types.NewCommit(blockHeight-1, 1, types.BlockID{}, nil)
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
|
||||
vote := makeVote(t, &lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0])
|
||||
lastCommit = types.NewCommit(vote.Height, vote.Round, lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error apply block: %w", err))
|
||||
}
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
|
||||
bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||
bcReactor.SetLogger(logger.With("module", "blockchain"))
|
||||
|
||||
return bcReactor
|
||||
}
|
||||
|
||||
func newBlockchainReactorPair(
|
||||
t *testing.T,
|
||||
logger log.Logger,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64) BlockchainReactorPair {
|
||||
|
||||
consensusReactor := &consensusReactorTest{}
|
||||
consensusReactor.BaseReactor = *p2p.NewBaseReactor("Consensus reactor", consensusReactor)
|
||||
|
||||
return BlockchainReactorPair{
|
||||
newBlockchainReactor(t, logger, genDoc, privVals, maxBlockHeight),
|
||||
consensusReactor}
|
||||
}
|
||||
|
||||
type consensusReactorTest struct {
|
||||
p2p.BaseReactor // BaseService + p2p.Switch
|
||||
switchedToConsensus bool
|
||||
mtx sync.Mutex
|
||||
}
|
||||
|
||||
func (conR *consensusReactorTest) SwitchToConsensus(state sm.State, blocksSynced bool) {
|
||||
conR.mtx.Lock()
|
||||
defer conR.mtx.Unlock()
|
||||
conR.switchedToConsensus = true
|
||||
}
|
||||
|
||||
func TestFastSyncNoBlockResponse(t *testing.T) {
|
||||
|
||||
config = cfg.ResetTestRoot("blockchain_new_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
maxBlockHeight := int64(65)
|
||||
|
||||
reactorPairs := make([]BlockchainReactorPair, 2)
|
||||
|
||||
logger := log.TestingLogger()
|
||||
reactorPairs[0] = newBlockchainReactorPair(t, logger, genDoc, privVals, maxBlockHeight)
|
||||
reactorPairs[1] = newBlockchainReactorPair(t, logger, genDoc, privVals, 0)
|
||||
|
||||
p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].bcR)
|
||||
s.AddReactor("CONSENSUS", reactorPairs[i].conR)
|
||||
moduleName := fmt.Sprintf("blockchain-%v", i)
|
||||
reactorPairs[i].bcR.SetLogger(logger.With("module", moduleName))
|
||||
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
_ = r.bcR.Stop()
|
||||
_ = r.conR.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
tests := []struct {
|
||||
height int64
|
||||
existent bool
|
||||
}{
|
||||
{maxBlockHeight + 2, false},
|
||||
{10, true},
|
||||
{1, true},
|
||||
{maxBlockHeight + 100, false},
|
||||
}
|
||||
|
||||
for {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
reactorPairs[1].conR.mtx.Lock()
|
||||
if reactorPairs[1].conR.switchedToConsensus {
|
||||
reactorPairs[1].conR.mtx.Unlock()
|
||||
break
|
||||
}
|
||||
reactorPairs[1].conR.mtx.Unlock()
|
||||
}
|
||||
|
||||
assert.Equal(t, maxBlockHeight, reactorPairs[0].bcR.store.Height())
|
||||
|
||||
for _, tt := range tests {
|
||||
block := reactorPairs[1].bcR.store.LoadBlock(tt.height)
|
||||
if tt.existent {
|
||||
assert.True(t, block != nil)
|
||||
} else {
|
||||
assert.True(t, block == nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: This is too hard to test without
|
||||
// an easy way to add test peer to switch
|
||||
// or without significant refactoring of the module.
|
||||
// Alternatively we could actually dial a TCP conn but
|
||||
// that seems extreme.
|
||||
func TestFastSyncBadBlockStopsPeer(t *testing.T) {
|
||||
numNodes := 4
|
||||
maxBlockHeight := int64(148)
|
||||
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
otherChain := newBlockchainReactorPair(t, log.TestingLogger(), genDoc, privVals, maxBlockHeight)
|
||||
defer func() {
|
||||
_ = otherChain.bcR.Stop()
|
||||
_ = otherChain.conR.Stop()
|
||||
}()
|
||||
|
||||
reactorPairs := make([]BlockchainReactorPair, numNodes)
|
||||
logger := make([]log.Logger, numNodes)
|
||||
|
||||
for i := 0; i < numNodes; i++ {
|
||||
logger[i] = log.TestingLogger()
|
||||
height := int64(0)
|
||||
if i == 0 {
|
||||
height = maxBlockHeight
|
||||
}
|
||||
reactorPairs[i] = newBlockchainReactorPair(t, logger[i], genDoc, privVals, height)
|
||||
}
|
||||
|
||||
switches := p2p.MakeConnectedSwitches(config.P2P, numNodes, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
reactorPairs[i].conR.mtx.Lock()
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].bcR)
|
||||
s.AddReactor("CONSENSUS", reactorPairs[i].conR)
|
||||
moduleName := fmt.Sprintf("blockchain-%v", i)
|
||||
reactorPairs[i].bcR.SetLogger(logger[i].With("module", moduleName))
|
||||
reactorPairs[i].conR.mtx.Unlock()
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
_ = r.bcR.Stop()
|
||||
_ = r.conR.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
outerFor:
|
||||
for {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
for i := 0; i < numNodes; i++ {
|
||||
reactorPairs[i].conR.mtx.Lock()
|
||||
if !reactorPairs[i].conR.switchedToConsensus {
|
||||
reactorPairs[i].conR.mtx.Unlock()
|
||||
continue outerFor
|
||||
}
|
||||
reactorPairs[i].conR.mtx.Unlock()
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// at this time, reactors[0-3] is the newest
|
||||
assert.Equal(t, numNodes-1, reactorPairs[1].bcR.Switch.Peers().Size())
|
||||
|
||||
// mark last reactorPair as an invalid peer
|
||||
reactorPairs[numNodes-1].bcR.store = otherChain.bcR.store
|
||||
|
||||
lastLogger := log.TestingLogger()
|
||||
lastReactorPair := newBlockchainReactorPair(t, lastLogger, genDoc, privVals, 0)
|
||||
reactorPairs = append(reactorPairs, lastReactorPair)
|
||||
|
||||
switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[len(reactorPairs)-1].bcR)
|
||||
s.AddReactor("CONSENSUS", reactorPairs[len(reactorPairs)-1].conR)
|
||||
moduleName := fmt.Sprintf("blockchain-%v", len(reactorPairs)-1)
|
||||
reactorPairs[len(reactorPairs)-1].bcR.SetLogger(lastLogger.With("module", moduleName))
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)...)
|
||||
|
||||
for i := 0; i < len(reactorPairs)-1; i++ {
|
||||
p2p.Connect2Switches(switches, i, len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
for {
|
||||
time.Sleep(1 * time.Second)
|
||||
lastReactorPair.conR.mtx.Lock()
|
||||
if lastReactorPair.conR.switchedToConsensus {
|
||||
lastReactorPair.conR.mtx.Unlock()
|
||||
break
|
||||
}
|
||||
lastReactorPair.conR.mtx.Unlock()
|
||||
|
||||
if lastReactorPair.bcR.Switch.Peers().Size() == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, lastReactorPair.bcR.Switch.Peers().Size() < len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
func TestLegacyReactorReceiveBasic(t *testing.T) {
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
reactor := newBlockchainReactor(t, log.TestingLogger(), genDoc, privVals, 10)
|
||||
peer := p2p.CreateRandomPeer(false)
|
||||
|
||||
reactor.InitPeer(peer)
|
||||
reactor.AddPeer(peer)
|
||||
m := &bcproto.StatusRequest{}
|
||||
wm := m.Wrap()
|
||||
msg, err := proto.Marshal(wm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
reactor.Receive(BlockchainChannel, peer, msg)
|
||||
})
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// utility funcs
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
|
||||
type testApp struct {
|
||||
abci.BaseApplication
|
||||
}
|
||||
@@ -1,27 +1,21 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
errPeerQueueFull = errors.New("peer queue full")
|
||||
errNoPeer = errors.New("peer not found")
|
||||
)
|
||||
|
||||
type iIO interface {
|
||||
sendBlockRequest(peerID p2p.ID, height int64) error
|
||||
sendBlockToPeer(block *types.Block, peerID p2p.ID) error
|
||||
sendBlockNotFound(height int64, peerID p2p.ID) error
|
||||
sendStatusResponse(base, height int64, peerID p2p.ID) error
|
||||
|
||||
broadcastStatusRequest() error
|
||||
broadcastStatusRequest()
|
||||
|
||||
trySwitchToConsensus(state state.State, skipWAL bool) bool
|
||||
}
|
||||
@@ -50,16 +44,13 @@ type consensusReactor interface {
|
||||
func (sio *switchIO) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
peer := sio.sw.Peers().Get(peerID)
|
||||
if peer == nil {
|
||||
return errNoPeer
|
||||
return fmt.Errorf("peer not found")
|
||||
}
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: height})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
return errPeerQueueFull
|
||||
if queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.BlockRequest{Height: height},
|
||||
}, sio.sw.Logger); !queued {
|
||||
return fmt.Errorf("send queue full")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -67,16 +58,14 @@ func (sio *switchIO) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
func (sio *switchIO) sendStatusResponse(base int64, height int64, peerID p2p.ID) error {
|
||||
peer := sio.sw.Peers().Get(peerID)
|
||||
if peer == nil {
|
||||
return errNoPeer
|
||||
return fmt.Errorf("peer not found")
|
||||
}
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{Height: height, Base: base})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return errPeerQueueFull
|
||||
if queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
}, sio.sw.Logger); !queued {
|
||||
return fmt.Errorf("peer queue full")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -85,7 +74,7 @@ func (sio *switchIO) sendStatusResponse(base int64, height int64, peerID p2p.ID)
|
||||
func (sio *switchIO) sendBlockToPeer(block *types.Block, peerID p2p.ID) error {
|
||||
peer := sio.sw.Peers().Get(peerID)
|
||||
if peer == nil {
|
||||
return errNoPeer
|
||||
return fmt.Errorf("peer not found")
|
||||
}
|
||||
if block == nil {
|
||||
panic("trying to send nil block")
|
||||
@@ -96,12 +85,11 @@ func (sio *switchIO) sendBlockToPeer(block *types.Block, peerID p2p.ID) error {
|
||||
return err
|
||||
}
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: bpb})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return errPeerQueueFull
|
||||
if queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.BlockResponse{Block: bpb},
|
||||
}, sio.sw.Logger); !queued {
|
||||
return fmt.Errorf("peer queue full")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -110,15 +98,13 @@ func (sio *switchIO) sendBlockToPeer(block *types.Block, peerID p2p.ID) error {
|
||||
func (sio *switchIO) sendBlockNotFound(height int64, peerID p2p.ID) error {
|
||||
peer := sio.sw.Peers().Get(peerID)
|
||||
if peer == nil {
|
||||
return errNoPeer
|
||||
return fmt.Errorf("peer not found")
|
||||
}
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: height})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return errPeerQueueFull
|
||||
if queued := p2p.TrySendEnvelopeShim(peer, p2p.Envelope{ //nolint: staticcheck
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.NoBlockResponse{Height: height},
|
||||
}, sio.sw.Logger); !queued {
|
||||
return fmt.Errorf("peer queue full")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -132,14 +118,10 @@ func (sio *switchIO) trySwitchToConsensus(state state.State, skipWAL bool) bool
|
||||
return ok
|
||||
}
|
||||
|
||||
func (sio *switchIO) broadcastStatusRequest() error {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
func (sio *switchIO) broadcastStatusRequest() {
|
||||
// XXX: maybe we should use an io specific peer list here
|
||||
sio.sw.Broadcast(BlockchainChannel, msgBytes)
|
||||
|
||||
return nil
|
||||
sio.sw.BroadcastEnvelope(p2p.Envelope{
|
||||
ChannelID: BlockchainChannel,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
})
|
||||
}
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -47,6 +49,11 @@ type BlockchainReactor struct {
|
||||
store blockStore
|
||||
}
|
||||
|
||||
//nolint:unused,deadcode
|
||||
type blockVerifier interface {
|
||||
VerifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error
|
||||
}
|
||||
|
||||
type blockApplier interface {
|
||||
ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, int64, error)
|
||||
}
|
||||
@@ -210,7 +217,7 @@ type bcBlockResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID p2p.ID
|
||||
size int64
|
||||
size int
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
@@ -344,9 +351,7 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
case <-doProcessBlockCh:
|
||||
r.processor.send(rProcessBlock{})
|
||||
case <-doStatusCh:
|
||||
if err := r.io.broadcastStatusRequest(); err != nil {
|
||||
r.logger.Error("Error broadcasting status request", "err", err)
|
||||
}
|
||||
r.io.broadcastStatusRequest()
|
||||
|
||||
// Events from peers. Closing the channel signals event loop termination.
|
||||
case event, ok := <-events:
|
||||
@@ -450,67 +455,55 @@ func (r *BlockchainReactor) Stop() error {
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling different message types.
|
||||
// XXX: do not call any methods that can block or incur heavy processing.
|
||||
// https://github.com/tendermint/tendermint/issues/2888
|
||||
func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
logger := r.logger.With("src", src.ID(), "chID", chID)
|
||||
|
||||
msg, err := bc.DecodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
logger.Error("error decoding message", "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
func (r *BlockchainReactor) ReceiveEnvelope(e p2p.Envelope) {
|
||||
if err := bc.ValidateMsg(e.Message); err != nil {
|
||||
r.logger.Error("peer sent us invalid msg", "peer", e.Src, "msg", e.Message, "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(e.Src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if err = bc.ValidateMsg(msg); err != nil {
|
||||
logger.Error("peer sent us invalid msg", "msg", msg, "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
r.logger.Debug("Receive", "src", e.Src.ID(), "chID", e.ChannelID, "msg", e.Message)
|
||||
|
||||
r.logger.Debug("Receive", "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
switch msg := e.Message.(type) {
|
||||
case *bcproto.StatusRequest:
|
||||
if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src.ID()); err != nil {
|
||||
logger.Error("Could not send status message to src peer")
|
||||
if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), e.Src.ID()); err != nil {
|
||||
r.logger.Error("Could not send status message to peer", "src", e.Src)
|
||||
}
|
||||
|
||||
case *bcproto.BlockRequest:
|
||||
block := r.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
if err = r.io.sendBlockToPeer(block, src.ID()); err != nil {
|
||||
logger.Error("Could not send block message to src peer", "err", err)
|
||||
if err := r.io.sendBlockToPeer(block, e.Src.ID()); err != nil {
|
||||
r.logger.Error("Could not send block message to peer: ", err)
|
||||
}
|
||||
} else {
|
||||
logger.Info("peer asking for a block we don't have", "height", msg.Height)
|
||||
peerID := src.ID()
|
||||
if err = r.io.sendBlockNotFound(msg.Height, peerID); err != nil {
|
||||
logger.Error("Couldn't send block not found msg", "err", err)
|
||||
r.logger.Info("peer asking for a block we don't have", "src", e.Src, "height", msg.Height)
|
||||
peerID := e.Src.ID()
|
||||
if err := r.io.sendBlockNotFound(msg.Height, peerID); err != nil {
|
||||
r.logger.Error("Couldn't send block not found: ", err)
|
||||
}
|
||||
}
|
||||
|
||||
case *bcproto.StatusResponse:
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcStatusResponse{peerID: src.ID(), base: msg.Base, height: msg.Height}
|
||||
r.events <- bcStatusResponse{peerID: e.Src.ID(), base: msg.Base, height: msg.Height}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
|
||||
case *bcproto.BlockResponse:
|
||||
r.mtx.RLock()
|
||||
bi, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
logger.Error("error transitioning block from protobuf", "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
r.logger.Error("error transitioning block from protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcBlockResponse{
|
||||
peerID: src.ID(),
|
||||
peerID: e.Src.ID(),
|
||||
block: bi,
|
||||
size: int64(len(msgBytes)),
|
||||
time: time.Now(),
|
||||
size: msg.Size(),
|
||||
}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
@@ -518,12 +511,29 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
case *bcproto.NoBlockResponse:
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcNoBlockResponse{peerID: src.ID(), height: msg.Height, time: time.Now()}
|
||||
r.events <- bcNoBlockResponse{peerID: e.Src.ID(), height: msg.Height, time: time.Now()}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {
|
||||
msg := &bcproto.Message{}
|
||||
err := proto.Unmarshal(msgBytes, msg)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
uw, err := msg.Unwrap()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
r.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: chID,
|
||||
Src: peer,
|
||||
Message: uw,
|
||||
})
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor interface
|
||||
func (r *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer.ID())
|
||||
@@ -554,10 +564,11 @@ func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: BlockchainChannel,
|
||||
Priority: 10,
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 2000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
MessageType: &bcproto.Message{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,13 +9,13 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
@@ -53,31 +53,19 @@ func (mp mockPeer) NodeInfo() p2p.NodeInfo {
|
||||
func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }
|
||||
func (mp mockPeer) SocketAddr() *p2p.NetAddress { return &p2p.NetAddress{} }
|
||||
|
||||
func (mp mockPeer) SendEnvelope(e p2p.Envelope) bool { return true }
|
||||
func (mp mockPeer) TrySendEnvelope(e p2p.Envelope) bool { return true }
|
||||
|
||||
func (mp mockPeer) Send(byte, []byte) bool { return true }
|
||||
func (mp mockPeer) TrySend(byte, []byte) bool { return true }
|
||||
|
||||
func (mp mockPeer) Set(string, interface{}) {}
|
||||
func (mp mockPeer) Get(string) interface{} { return struct{}{} }
|
||||
|
||||
//nolint:unused
|
||||
type mockBlockStore struct {
|
||||
blocks map[int64]*types.Block
|
||||
}
|
||||
func (mp mockPeer) SetRemovalFailed() {}
|
||||
func (mp mockPeer) GetRemovalFailed() bool { return false }
|
||||
|
||||
func (ml *mockBlockStore) Height() int64 {
|
||||
return int64(len(ml.blocks))
|
||||
}
|
||||
|
||||
func (ml *mockBlockStore) LoadBlock(height int64) *types.Block {
|
||||
return ml.blocks[height]
|
||||
}
|
||||
|
||||
func (ml *mockBlockStore) SaveBlock(block *types.Block, part *types.PartSet, commit *types.Commit) {
|
||||
ml.blocks[block.Height] = block
|
||||
}
|
||||
|
||||
type mockBlockApplier struct {
|
||||
}
|
||||
type mockBlockApplier struct{}
|
||||
|
||||
// XXX: Add whitelist/blacklist?
|
||||
func (mba *mockBlockApplier) ApplyBlock(
|
||||
@@ -127,8 +115,7 @@ func (sio *mockSwitchIo) trySwitchToConsensus(state sm.State, skipWAL bool) bool
|
||||
return true
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) broadcastStatusRequest() error {
|
||||
return nil
|
||||
func (sio *mockSwitchIo) broadcastStatusRequest() {
|
||||
}
|
||||
|
||||
type testReactorParams struct {
|
||||
@@ -156,7 +143,9 @@ func newTestReactor(p testReactorParams) *BlockchainReactor {
|
||||
panic(fmt.Errorf("error start app: %w", err))
|
||||
}
|
||||
db := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(db)
|
||||
stateStore := sm.NewStore(db, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
appl = sm.NewBlockExecutor(stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
@@ -347,9 +336,7 @@ func newTestReactor(p testReactorParams) *BlockchainReactor {
|
||||
// }
|
||||
|
||||
func TestReactorHelperMode(t *testing.T) {
|
||||
var (
|
||||
channelID = byte(0x40)
|
||||
)
|
||||
channelID := byte(0x40)
|
||||
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
@@ -365,7 +352,7 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
|
||||
type testEvent struct {
|
||||
peer string
|
||||
event interface{}
|
||||
event proto.Message
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
@@ -377,10 +364,10 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
name: "status request",
|
||||
params: params,
|
||||
msgs: []testEvent{
|
||||
{"P1", bcproto.StatusRequest{}},
|
||||
{"P1", bcproto.BlockRequest{Height: 13}},
|
||||
{"P1", bcproto.BlockRequest{Height: 20}},
|
||||
{"P1", bcproto.BlockRequest{Height: 22}},
|
||||
{"P1", &bcproto.StatusRequest{}},
|
||||
{"P1", &bcproto.BlockRequest{Height: 13}},
|
||||
{"P1", &bcproto.BlockRequest{Height: 20}},
|
||||
{"P1", &bcproto.BlockRequest{Height: 22}},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -397,25 +384,27 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
for i := 0; i < len(tt.msgs); i++ {
|
||||
step := tt.msgs[i]
|
||||
switch ev := step.event.(type) {
|
||||
case bcproto.StatusRequest:
|
||||
case *bcproto.StatusRequest:
|
||||
old := mockSwitch.numStatusResponse
|
||||
msg, err := bc.EncodeMsg(&ev)
|
||||
assert.NoError(t, err)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
|
||||
reactor.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: channelID,
|
||||
Src: mockPeer{id: p2p.ID(step.peer)},
|
||||
Message: ev})
|
||||
assert.Equal(t, old+1, mockSwitch.numStatusResponse)
|
||||
case bcproto.BlockRequest:
|
||||
case *bcproto.BlockRequest:
|
||||
if ev.Height > params.startHeight {
|
||||
old := mockSwitch.numNoBlockResponse
|
||||
msg, err := bc.EncodeMsg(&ev)
|
||||
assert.NoError(t, err)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
|
||||
reactor.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: channelID,
|
||||
Src: mockPeer{id: p2p.ID(step.peer)},
|
||||
Message: ev})
|
||||
assert.Equal(t, old+1, mockSwitch.numNoBlockResponse)
|
||||
} else {
|
||||
old := mockSwitch.numBlockResponse
|
||||
msg, err := bc.EncodeMsg(&ev)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
|
||||
reactor.ReceiveEnvelope(p2p.Envelope{
|
||||
ChannelID: channelID,
|
||||
Src: mockPeer{id: p2p.ID(step.peer)},
|
||||
Message: ev})
|
||||
assert.Equal(t, old+1, mockSwitch.numBlockResponse)
|
||||
}
|
||||
}
|
||||
@@ -426,6 +415,34 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestLegacyReactorReceiveBasic(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30)
|
||||
params := testReactorParams{
|
||||
logger: log.TestingLogger(),
|
||||
genDoc: genDoc,
|
||||
privVals: privVals,
|
||||
startHeight: 20,
|
||||
mockA: true,
|
||||
}
|
||||
reactor := newTestReactor(params)
|
||||
mockSwitch := &mockSwitchIo{switchedToConsensus: false}
|
||||
reactor.io = mockSwitch
|
||||
peer := p2p.CreateRandomPeer(false)
|
||||
|
||||
reactor.InitPeer(peer)
|
||||
reactor.AddPeer(peer)
|
||||
m := &bcproto.StatusRequest{}
|
||||
wm := m.Wrap()
|
||||
msg, err := proto.Marshal(wm)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
reactor.Receive(BlockchainChannel, peer, msg)
|
||||
})
|
||||
}
|
||||
|
||||
func TestReactorSetSwitchNil(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
@@ -462,7 +479,8 @@ type testApp struct {
|
||||
}
|
||||
|
||||
func randGenesisDoc(chainID string, numValidators int, randPower bool, minPower int64) (
|
||||
*types.GenesisDoc, []types.PrivValidator) {
|
||||
*types.GenesisDoc, []types.PrivValidator,
|
||||
) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
for i := 0; i < numValidators; i++ {
|
||||
@@ -487,7 +505,8 @@ func randGenesisDoc(chainID string, numValidators int, randPower bool, minPower
|
||||
func newReactorStore(
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64) (*store.BlockStore, sm.State, *sm.BlockExecutor) {
|
||||
maxBlockHeight int64,
|
||||
) (*store.BlockStore, sm.State, *sm.BlockExecutor) {
|
||||
if len(privVals) != 1 {
|
||||
panic("only support one validator")
|
||||
}
|
||||
@@ -501,14 +520,19 @@ func newReactorStore(
|
||||
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
stateStore := sm.NewStore(stateDB, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
})
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
|
||||
}
|
||||
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db)
|
||||
stateStore = sm.NewStore(db, sm.StoreOptions{
|
||||
DiscardABCIResponses: false,
|
||||
},
|
||||
)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
|
||||
@@ -52,13 +52,8 @@ func (rt *Routine) setLogger(logger log.Logger) {
|
||||
rt.logger = logger
|
||||
}
|
||||
|
||||
// nolint:unused
|
||||
func (rt *Routine) setMetrics(metrics *Metrics) {
|
||||
rt.metrics = metrics
|
||||
}
|
||||
|
||||
func (rt *Routine) start() {
|
||||
rt.logger.Info(fmt.Sprintf("%s: run", rt.name))
|
||||
rt.logger.Info("routine start", "msg", log.NewLazySprintf("%s: run", rt.name))
|
||||
running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1))
|
||||
if !running {
|
||||
panic(fmt.Sprintf("%s is already running", rt.name))
|
||||
@@ -98,7 +93,7 @@ func (rt *Routine) start() {
|
||||
return
|
||||
}
|
||||
rt.metrics.EventsOut.With("routine", rt.name).Add(1)
|
||||
rt.logger.Debug(fmt.Sprintf("%s: produced %T %+v", rt.name, oEvent, oEvent))
|
||||
rt.logger.Debug("routine start", "msg", log.NewLazySprintf("%s: produced %T %+v", rt.name, oEvent, oEvent))
|
||||
|
||||
// Skip rTrySchedule and rProcessBlock events as they clutter the history
|
||||
// due to their frequency.
|
||||
@@ -118,7 +113,7 @@ func (rt *Routine) start() {
|
||||
|
||||
// XXX: look into returning OpError in the net package
|
||||
func (rt *Routine) send(event Event) bool {
|
||||
rt.logger.Debug(fmt.Sprintf("%s: received %T %+v", rt.name, event, event))
|
||||
rt.logger.Debug("routine send", "msg", log.NewLazySprintf("%s: received %T %+v", rt.name, event, event))
|
||||
if !rt.isRunning() {
|
||||
return false
|
||||
}
|
||||
@@ -150,7 +145,7 @@ func (rt *Routine) stop() {
|
||||
return
|
||||
}
|
||||
|
||||
rt.logger.Info(fmt.Sprintf("%s: stop", rt.name))
|
||||
rt.logger.Info("routine stop", "msg", log.NewLazySprintf("%s: stop", rt.name))
|
||||
rt.queue.Dispose() // this should block until all queue items are free?
|
||||
}
|
||||
|
||||
|
||||
@@ -366,7 +366,7 @@ func (sc *scheduler) setStateAtHeight(height int64, state blockState) {
|
||||
}
|
||||
|
||||
// CONTRACT: peer exists and in Ready state.
|
||||
func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int64, now time.Time) error {
|
||||
func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int, now time.Time) error {
|
||||
peer := sc.peers[peerID]
|
||||
|
||||
if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID {
|
||||
@@ -379,7 +379,7 @@ func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int64, now t
|
||||
height, pendingTime, now)
|
||||
}
|
||||
|
||||
peer.lastRate = size / now.Sub(pendingTime).Nanoseconds()
|
||||
peer.lastRate = int64(size) / now.Sub(pendingTime).Nanoseconds()
|
||||
|
||||
sc.setStateAtHeight(height, blockStateReceived)
|
||||
delete(sc.pendingBlocks, height)
|
||||
@@ -532,7 +532,7 @@ func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) {
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
err = sc.markReceived(event.peerID, event.block.Height, event.size, event.time)
|
||||
err = sc.markReceived(event.peerID, event.block.Height, event.block.Size(), event.time)
|
||||
if err != nil {
|
||||
sc.removePeer(event.peerID)
|
||||
return scPeerError{peerID: event.peerID, reason: err}, nil
|
||||
|
||||
@@ -853,7 +853,7 @@ func TestScMarkReceived(t *testing.T) {
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
height int64
|
||||
size int64
|
||||
size int
|
||||
tm time.Time
|
||||
}
|
||||
tests := []struct {
|
||||
@@ -1373,9 +1373,6 @@ func checkScResults(t *testing.T, wantErr bool, err error, wantEvent Event, even
|
||||
t.Errorf("error = %v, wantErr %v", err, wantErr)
|
||||
return
|
||||
}
|
||||
if !assert.IsType(t, wantEvent, event) {
|
||||
t.Log(fmt.Sprintf("Wrong type received, got: %v", event))
|
||||
}
|
||||
switch wantEvent := wantEvent.(type) {
|
||||
case scPeerError:
|
||||
assert.Equal(t, wantEvent.peerID, event.(scPeerError).peerID)
|
||||
@@ -1459,7 +1456,7 @@ func TestScHandleBlockResponse(t *testing.T) {
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: block6FromP1},
|
||||
wantEvent: scBlockReceived{peerID: "P1", block: block6FromP1.block},
|
||||
wantEvent: scBlockReceived{peerID: "P1", block: makeScBlock(6)},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -2046,8 +2043,6 @@ func TestScHandle(t *testing.T) {
|
||||
priorityNormal
|
||||
}
|
||||
|
||||
block1, block2, block3 := makeScBlock(1), makeScBlock(2), makeScBlock(3)
|
||||
|
||||
t0 := time.Now()
|
||||
tick := make([]time.Time, 100)
|
||||
for i := range tick {
|
||||
@@ -2136,8 +2131,8 @@ func TestScHandle(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{ // block response 1
|
||||
args: args{event: bcBlockResponse{peerID: "P1", time: tick[4], size: 100, block: block1}},
|
||||
wantEvent: scBlockReceived{peerID: "P1", block: block1},
|
||||
args: args{event: bcBlockResponse{peerID: "P1", time: tick[4], size: 100, block: makeScBlock(1)}},
|
||||
wantEvent: scBlockReceived{peerID: "P1", block: makeScBlock(1)},
|
||||
wantSc: &scTestParams{
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[4]}},
|
||||
@@ -2149,8 +2144,8 @@ func TestScHandle(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{ // block response 2
|
||||
args: args{event: bcBlockResponse{peerID: "P1", time: tick[5], size: 100, block: block2}},
|
||||
wantEvent: scBlockReceived{peerID: "P1", block: block2},
|
||||
args: args{event: bcBlockResponse{peerID: "P1", time: tick[5], size: 100, block: makeScBlock(2)}},
|
||||
wantEvent: scBlockReceived{peerID: "P1", block: makeScBlock(2)},
|
||||
wantSc: &scTestParams{
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[5]}},
|
||||
@@ -2162,8 +2157,8 @@ func TestScHandle(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{ // block response 3
|
||||
args: args{event: bcBlockResponse{peerID: "P1", time: tick[6], size: 100, block: block3}},
|
||||
wantEvent: scBlockReceived{peerID: "P1", block: block3},
|
||||
args: args{event: bcBlockResponse{peerID: "P1", time: tick[6], size: 100, block: makeScBlock(3)}},
|
||||
wantEvent: scBlockReceived{peerID: "P1", block: makeScBlock(3)},
|
||||
wantSc: &scTestParams{
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
|
||||
18
buf.gen.yaml
18
buf.gen.yaml
@@ -1,13 +1,9 @@
|
||||
# The version of the generation template.
|
||||
# Required.
|
||||
# The only currently-valid value is v1beta1.
|
||||
version: v1beta1
|
||||
|
||||
# The plugins to run.
|
||||
version: v1
|
||||
plugins:
|
||||
# The name of the plugin.
|
||||
- name: gogofaster
|
||||
# The the relative output directory.
|
||||
out: proto
|
||||
# Any options to provide to the plugin.
|
||||
opt: Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc,paths=source_relative
|
||||
out: ./proto/
|
||||
opt:
|
||||
- Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types
|
||||
- Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration
|
||||
- plugins=grpc
|
||||
- paths=source_relative
|
||||
|
||||
3
buf.work.yaml
Normal file
3
buf.work.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
version: v1
|
||||
directories:
|
||||
- proto
|
||||
66
cmd/tendermint/commands/compact.go
Normal file
66
cmd/tendermint/commands/compact.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/syndtr/goleveldb/leveldb"
|
||||
"github.com/syndtr/goleveldb/leveldb/opt"
|
||||
"github.com/syndtr/goleveldb/leveldb/util"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
var CompactGoLevelDBCmd = &cobra.Command{
|
||||
Use: "experimental-compact-goleveldb",
|
||||
Short: "force compacts the tendermint storage engine (only GoLevelDB supported)",
|
||||
Long: `
|
||||
This is a temporary utility command that performs a force compaction on the state
|
||||
and blockstores to reduce disk space for a pruning node. This should only be run
|
||||
once the node has stopped. This command will likely be omitted in the future after
|
||||
the planned refactor to the storage engine.
|
||||
|
||||
Currently, only GoLevelDB is supported.
|
||||
`,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if config.DBBackend != "goleveldb" {
|
||||
return errors.New("compaction is currently only supported with goleveldb")
|
||||
}
|
||||
|
||||
compactGoLevelDBs(config.RootDir, logger)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
func compactGoLevelDBs(rootDir string, logger log.Logger) {
|
||||
dbNames := []string{"state", "blockstore"}
|
||||
o := &opt.Options{
|
||||
DisableSeeksCompaction: true,
|
||||
}
|
||||
wg := sync.WaitGroup{}
|
||||
|
||||
for _, dbName := range dbNames {
|
||||
dbName := dbName
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
dbPath := filepath.Join(rootDir, "data", dbName+".db")
|
||||
store, err := leveldb.OpenFile(dbPath, o)
|
||||
if err != nil {
|
||||
logger.Error("failed to initialize tendermint db", "path", dbPath, "err", err)
|
||||
return
|
||||
}
|
||||
defer store.Close()
|
||||
|
||||
logger.Info("starting compaction...", "db", dbPath)
|
||||
|
||||
err = store.CompactRange(util.Range{Start: nil, Limit: nil})
|
||||
if err != nil {
|
||||
logger.Error("failed to compact tendermint db", "path", dbPath, "err", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package debug
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
@@ -82,7 +81,7 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error {
|
||||
func dumpDebugData(outDir string, conf *cfg.Config, rpc *rpchttp.HTTP) {
|
||||
start := time.Now().UTC()
|
||||
|
||||
tmpDir, err := ioutil.TempDir(outDir, "tendermint_debug_tmp")
|
||||
tmpDir, err := os.MkdirTemp(outDir, "tendermint_debug_tmp")
|
||||
if err != nil {
|
||||
logger.Error("failed to create temporary directory", "dir", tmpDir, "error", err)
|
||||
return
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -68,7 +67,6 @@ func zipDir(src, dest string) error {
|
||||
_, err = io.Copy(headerWriter, file)
|
||||
return err
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
// copyFile copies a file from src to dest and returns an error upon failure. The
|
||||
@@ -111,5 +109,5 @@ func writeStateJSONToFile(state interface{}, dir, filename string) error {
|
||||
return fmt.Errorf("failed to encode state dump: %w", err)
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm)
|
||||
return os.WriteFile(path.Join(dir, filename), stateJSON, os.ModePerm)
|
||||
}
|
||||
|
||||
@@ -3,7 +3,6 @@ package debug
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
@@ -56,7 +55,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Create a temporary directory which will contain all the state dumps and
|
||||
// relevant files and directories that will be compressed into a file.
|
||||
tmpDir, err := ioutil.TempDir(os.TempDir(), "tendermint_debug_tmp")
|
||||
tmpDir, err := os.MkdirTemp(os.TempDir(), "tendermint_debug_tmp")
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create temporary directory: %w", err)
|
||||
}
|
||||
@@ -105,7 +104,7 @@ func killProc(pid uint64, dir string) error {
|
||||
// pipe STDERR output from tailing the Tendermint process to a file
|
||||
//
|
||||
// NOTE: This will only work on UNIX systems.
|
||||
cmd := exec.Command("tail", "-f", fmt.Sprintf("/proc/%d/fd/2", pid)) // nolint: gosec
|
||||
cmd := exec.Command("tail", "-f", fmt.Sprintf("/proc/%d/fd/2", pid)) //nolint: gosec
|
||||
|
||||
outFile, err := os.Create(filepath.Join(dir, "stacktrace.out"))
|
||||
if err != nil {
|
||||
|
||||
@@ -3,7 +3,7 @@ package debug
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
@@ -67,16 +67,17 @@ func copyConfig(home, dir string) error {
|
||||
func dumpProfile(dir, addr, profile string, debug int) error {
|
||||
endpoint := fmt.Sprintf("%s/debug/pprof/%s?debug=%d", addr, profile, debug)
|
||||
|
||||
resp, err := http.Get(endpoint) // nolint: gosec
|
||||
//nolint:gosec,nolintlint
|
||||
resp, err := http.Get(endpoint)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to query for %s profile: %w", profile, err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read %s profile response body: %w", profile, err)
|
||||
}
|
||||
|
||||
return ioutil.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm)
|
||||
return os.WriteFile(path.Join(dir, fmt.Sprintf("%s.out", profile)), body, os.ModePerm)
|
||||
}
|
||||
|
||||
@@ -5,28 +5,30 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// GenNodeKeyCmd allows the generation of a node key. It prints JSON-encoded
|
||||
// NodeKey to the standard output.
|
||||
// GenNodeKeyCmd allows the generation of a node key. It prints node's ID to
|
||||
// the standard output.
|
||||
var GenNodeKeyCmd = &cobra.Command{
|
||||
Use: "gen_node_key",
|
||||
Short: "Generate new node key",
|
||||
RunE: genNodeKey,
|
||||
Use: "gen-node-key",
|
||||
Aliases: []string{"gen_node_key"},
|
||||
Short: "Generate a node key for this node and print its ID",
|
||||
PreRun: deprecateSnakeCase,
|
||||
RunE: genNodeKey,
|
||||
}
|
||||
|
||||
func genNodeKey(cmd *cobra.Command, args []string) error {
|
||||
nodeKey := p2p.GenNodeKey()
|
||||
|
||||
bz, err := tmjson.Marshal(nodeKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("nodeKey -> json: %w", err)
|
||||
nodeKeyFile := config.NodeKeyFile()
|
||||
if tmos.FileExists(nodeKeyFile) {
|
||||
return fmt.Errorf("node key at %s already exists", nodeKeyFile)
|
||||
}
|
||||
|
||||
fmt.Printf(`%v
|
||||
`, string(bz))
|
||||
|
||||
nodeKey, err := p2p.LoadOrGenNodeKey(nodeKeyFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(nodeKey.ID())
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,35 +7,24 @@ import (
|
||||
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// GenValidatorCmd allows the generation of a keypair for a
|
||||
// validator.
|
||||
var GenValidatorCmd = &cobra.Command{
|
||||
Use: "gen_validator",
|
||||
Short: "Generate new validator keypair",
|
||||
RunE: genValidator,
|
||||
Use: "gen-validator",
|
||||
Aliases: []string{"gen_validator"},
|
||||
Short: "Generate new validator keypair",
|
||||
PreRun: deprecateSnakeCase,
|
||||
Run: genValidator,
|
||||
}
|
||||
|
||||
func init() {
|
||||
GenValidatorCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
|
||||
"Key type to generate privval file with. Options: ed25519, secp256k1")
|
||||
}
|
||||
|
||||
func genValidator(cmd *cobra.Command, args []string) error {
|
||||
pv, err := privval.GenFilePV("", "", keyType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
func genValidator(cmd *cobra.Command, args []string) {
|
||||
pv := privval.GenFilePV("", "")
|
||||
jsbz, err := tmjson.Marshal(pv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("validator -> json: %w", err)
|
||||
panic(err)
|
||||
}
|
||||
|
||||
fmt.Printf(`%v
|
||||
`, string(jsbz))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
@@ -22,15 +21,6 @@ var InitFilesCmd = &cobra.Command{
|
||||
RunE: initFiles,
|
||||
}
|
||||
|
||||
var (
|
||||
keyType string
|
||||
)
|
||||
|
||||
func init() {
|
||||
InitFilesCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
|
||||
"Key type to generate privval file with. Options: ed25519, secp256k1")
|
||||
}
|
||||
|
||||
func initFiles(cmd *cobra.Command, args []string) error {
|
||||
return initFilesWithConfig(config)
|
||||
}
|
||||
@@ -39,19 +29,13 @@ func initFilesWithConfig(config *cfg.Config) error {
|
||||
// private validator
|
||||
privValKeyFile := config.PrivValidatorKeyFile()
|
||||
privValStateFile := config.PrivValidatorStateFile()
|
||||
var (
|
||||
pv *privval.FilePV
|
||||
err error
|
||||
)
|
||||
var pv *privval.FilePV
|
||||
if tmos.FileExists(privValKeyFile) {
|
||||
pv = privval.LoadFilePV(privValKeyFile, privValStateFile)
|
||||
logger.Info("Found private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
} else {
|
||||
pv, err = privval.GenFilePV(privValKeyFile, privValStateFile, keyType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pv = privval.GenFilePV(privValKeyFile, privValStateFile)
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
@@ -72,17 +56,11 @@ func initFilesWithConfig(config *cfg.Config) error {
|
||||
if tmos.FileExists(genFile) {
|
||||
logger.Info("Found genesis file", "path", genFile)
|
||||
} else {
|
||||
|
||||
genDoc := types.GenesisDoc{
|
||||
ChainID: fmt.Sprintf("test-chain-%v", tmrand.Str(6)),
|
||||
GenesisTime: tmtime.Now(),
|
||||
ConsensusParams: types.DefaultConsensusParams(),
|
||||
}
|
||||
if keyType == "secp256k1" {
|
||||
genDoc.ConsensusParams.Validator = tmproto.ValidatorParams{
|
||||
PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1},
|
||||
}
|
||||
}
|
||||
pubKey, err := pv.GetPubKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get pubkey: %w", err)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user