mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-12 15:52:50 +00:00
Compare commits
278 Commits
v0.35.0-de
...
docs-stagi
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5182ffee25 | ||
|
|
a72fb2fbad | ||
|
|
68bd2116f0 | ||
|
|
670e9b427b | ||
|
|
15c1936b85 | ||
|
|
c63854f732 | ||
|
|
527550f372 | ||
|
|
64961e2267 | ||
|
|
eff1b16a0c | ||
|
|
ea77360ecf | ||
|
|
5972105b06 | ||
|
|
af723eca8a | ||
|
|
62d7a5d028 | ||
|
|
96215a06ed | ||
|
|
3ef0b90afd | ||
|
|
d34e7c5b51 | ||
|
|
d8a2eb95bb | ||
|
|
eaa948ab7d | ||
|
|
5cbb8263b4 | ||
|
|
a2bd09253c | ||
|
|
ca285844ea | ||
|
|
74bee8d834 | ||
|
|
211bc08217 | ||
|
|
178d421c77 | ||
|
|
bada08c50c | ||
|
|
956b59af87 | ||
|
|
f05788e632 | ||
|
|
5b698ed13b | ||
|
|
1d16e39c0e | ||
|
|
78e8169750 | ||
|
|
03a6fb2777 | ||
|
|
c61cd3fd05 | ||
|
|
385ea1db7d | ||
|
|
66ba12d9bc | ||
|
|
09cf0bcb01 | ||
|
|
e986602649 | ||
|
|
2c95b0b5e0 | ||
|
|
a0d4d85375 | ||
|
|
8bf77d9b1a | ||
|
|
1ccd23ca1d | ||
|
|
47f5650615 | ||
|
|
5b17c01e41 | ||
|
|
9b9222f461 | ||
|
|
0555772d3a | ||
|
|
1e1d087494 | ||
|
|
85353d9af5 | ||
|
|
1570d26f84 | ||
|
|
c75dee5a02 | ||
|
|
17ca6c6c98 | ||
|
|
46964f62db | ||
|
|
9c47b572f7 | ||
|
|
1b18d26644 | ||
|
|
d39eb74daa | ||
|
|
91bef75f62 | ||
|
|
b4ce1de44a | ||
|
|
1b6df6783d | ||
|
|
cc3c18a6a7 | ||
|
|
8e7d431f6f | ||
|
|
3c1416b3d7 | ||
|
|
aef1ac7ba5 | ||
|
|
84ff991387 | ||
|
|
bc1f1e5ffa | ||
|
|
c4730bb46a | ||
|
|
392acdc733 | ||
|
|
1128244f4f | ||
|
|
886442c111 | ||
|
|
c6691b91c2 | ||
|
|
0a41711091 | ||
|
|
77deb710fb | ||
|
|
6a056e050c | ||
|
|
bdf688debc | ||
|
|
72f041b759 | ||
|
|
8c0af72987 | ||
|
|
ced66e4eb5 | ||
|
|
be6c016664 | ||
|
|
085fd66f33 | ||
|
|
2bb2af19e4 | ||
|
|
ebff8a96a5 | ||
|
|
6ef81c6074 | ||
|
|
e198edf20e | ||
|
|
bcfc889f25 | ||
|
|
6dce4ef701 | ||
|
|
1547a7e6c1 | ||
|
|
e3b96107af | ||
|
|
5aa859c370 | ||
|
|
41c067c474 | ||
|
|
3283a84ab2 | ||
|
|
c1be58a27b | ||
|
|
a24e00dda6 | ||
|
|
d01e6998a7 | ||
|
|
d8ae450784 | ||
|
|
28e79a4d02 | ||
|
|
0565eb5943 | ||
|
|
a879eb444d | ||
|
|
cdc217357d | ||
|
|
f9c54d2710 | ||
|
|
afb50425b3 | ||
|
|
15bf3a1509 | ||
|
|
89e908e340 | ||
|
|
8e80c26e80 | ||
|
|
b35e9ff53e | ||
|
|
4dcc9e979c | ||
|
|
0d108adaec | ||
|
|
cdf1e251f7 | ||
|
|
607565f34e | ||
|
|
42041f71b5 | ||
|
|
2c16ae99ee | ||
|
|
79890d8393 | ||
|
|
5882489bba | ||
|
|
8a80b97865 | ||
|
|
9205e85a9b | ||
|
|
243ff4b43d | ||
|
|
bcb7044d64 | ||
|
|
77d7328bc6 | ||
|
|
e820e68acd | ||
|
|
33dbff61d3 | ||
|
|
f368b91caf | ||
|
|
b1bbd37519 | ||
|
|
141d9c814d | ||
|
|
e13b4386ff | ||
|
|
0de4bec862 | ||
|
|
781f4badc3 | ||
|
|
27e8cea9ce | ||
|
|
98234b1171 | ||
|
|
6f9f8b58ae | ||
|
|
85e94161cd | ||
|
|
4988877f19 | ||
|
|
ba256b383b | ||
|
|
69dd8fea9d | ||
|
|
170cb70e19 | ||
|
|
3ad1157451 | ||
|
|
095e9cd7ef | ||
|
|
2c2120691c | ||
|
|
8b29622fe2 | ||
|
|
6bee97160f | ||
|
|
64101f5ac9 | ||
|
|
3246283cf2 | ||
|
|
f97a498cee | ||
|
|
68dc751a8c | ||
|
|
b435d9aae5 | ||
|
|
e3728e7709 | ||
|
|
f2f6a78809 | ||
|
|
b2d72dce7e | ||
|
|
909da42789 | ||
|
|
4e71357808 | ||
|
|
fbf2309962 | ||
|
|
39e81807a3 | ||
|
|
2d0fcf498d | ||
|
|
deb4f60613 | ||
|
|
a736530e01 | ||
|
|
e0950515ff | ||
|
|
8aa47c7da5 | ||
|
|
2f5e454892 | ||
|
|
e9294de946 | ||
|
|
fdecfa177d | ||
|
|
8bd3d5105f | ||
|
|
95cff1efb4 | ||
|
|
eb0d353767 | ||
|
|
a399fae100 | ||
|
|
baa20a4b9c | ||
|
|
af645ac778 | ||
|
|
adcfe80f09 | ||
|
|
7041cae8e1 | ||
|
|
fa522ca323 | ||
|
|
1181cfbd77 | ||
|
|
e7d7ad85d5 | ||
|
|
b508045eff | ||
|
|
ca46cbc781 | ||
|
|
bf35cc6443 | ||
|
|
27895a27a4 | ||
|
|
9fe7b4fe77 | ||
|
|
ec32df2d8a | ||
|
|
d0c87ff27e | ||
|
|
e52f9de148 | ||
|
|
8ae5c60637 | ||
|
|
47687dba31 | ||
|
|
865234e113 | ||
|
|
fdb7421ae8 | ||
|
|
a65c23a526 | ||
|
|
335e97433c | ||
|
|
3922dde05d | ||
|
|
83c7bd17bf | ||
|
|
f471affad5 | ||
|
|
0e9798f39f | ||
|
|
627f7b5989 | ||
|
|
8e6194626e | ||
|
|
886235311f | ||
|
|
b5d9da5d89 | ||
|
|
bcf9b0aa39 | ||
|
|
cafad28293 | ||
|
|
cd41091b18 | ||
|
|
53022220f6 | ||
|
|
651d8f087b | ||
|
|
1488b0a33b | ||
|
|
3be4800810 | ||
|
|
eeb92a632b | ||
|
|
d0db59e16c | ||
|
|
38587d83c4 | ||
|
|
80b9eb8f0f | ||
|
|
7daf6a1a03 | ||
|
|
10dda219a1 | ||
|
|
d1ef5028a0 | ||
|
|
20d66803c5 | ||
|
|
50b91867c3 | ||
|
|
d11e5993b1 | ||
|
|
99f645d200 | ||
|
|
e0e006d10f | ||
|
|
d4f906609a | ||
|
|
a24a58207a | ||
|
|
8c5fe166a6 | ||
|
|
17383be202 | ||
|
|
257b34b459 | ||
|
|
9e6248c0d7 | ||
|
|
b4adeab8b9 | ||
|
|
d785036e0b | ||
|
|
c206d9b680 | ||
|
|
3dabfbeae0 | ||
|
|
047267bbc8 | ||
|
|
6e16df8547 | ||
|
|
a5d3e19b4a | ||
|
|
6ef7b316cd | ||
|
|
e7184c499d | ||
|
|
f0d4ddcf3c | ||
|
|
7ccee61557 | ||
|
|
2fff29f340 | ||
|
|
b6979e7fbd | ||
|
|
ca1891944b | ||
|
|
ce824d6fad | ||
|
|
c6f8f0aefc | ||
|
|
8a67968416 | ||
|
|
61ee6b0b5d | ||
|
|
1a74e01d18 | ||
|
|
741a515f5b | ||
|
|
55e8ccab21 | ||
|
|
e1644d00c5 | ||
|
|
346aa14db5 | ||
|
|
7121f68f25 | ||
|
|
710ed63f55 | ||
|
|
3de8d14baa | ||
|
|
6de4bb1b6b | ||
|
|
6a2a71be07 | ||
|
|
260cc5dd69 | ||
|
|
12ebd7735a | ||
|
|
93b9bab932 | ||
|
|
82e4693cc5 | ||
|
|
169fa6dcdb | ||
|
|
302aec6dcc | ||
|
|
10f30f8a99 | ||
|
|
07bb7fb445 | ||
|
|
46252a9b69 | ||
|
|
8d28e7467c | ||
|
|
b894f07380 | ||
|
|
6c77207055 | ||
|
|
a6b22cfa97 | ||
|
|
d7d0ffea13 | ||
|
|
7e27e9b852 | ||
|
|
090afe30f9 | ||
|
|
b9d98a61c2 | ||
|
|
250c3aa92e | ||
|
|
a4b7018732 | ||
|
|
bf9e36d02d | ||
|
|
08708046a7 | ||
|
|
dcdf9bbff8 | ||
|
|
433bdf5063 | ||
|
|
2a0fa665fd | ||
|
|
b0130b4661 | ||
|
|
01c32c62e8 | ||
|
|
99aea7b079 | ||
|
|
2672b91ab0 | ||
|
|
4f79930c12 | ||
|
|
1635d1339c | ||
|
|
9df66eaa7d | ||
|
|
b802e9c9c3 | ||
|
|
95367eaf51 | ||
|
|
7eb4e5c0b1 | ||
|
|
f02987e7bc | ||
|
|
7d2b3e305e | ||
|
|
ca8a404c7c |
@@ -1,339 +0,0 @@
|
||||
version: 2.1
|
||||
|
||||
executors:
|
||||
golang:
|
||||
docker:
|
||||
- image: tendermintdev/docker-tendermint-build
|
||||
working_directory: /go/src/github.com/tendermint/tendermint
|
||||
environment:
|
||||
GOBIN: /tmp/bin
|
||||
release:
|
||||
machine: true
|
||||
docs:
|
||||
docker:
|
||||
- image: tendermintdev/docker-website-deployment
|
||||
environment:
|
||||
AWS_REGION: us-east-1
|
||||
|
||||
commands:
|
||||
run_test:
|
||||
parameters:
|
||||
script_path:
|
||||
type: string
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore go modules cache"
|
||||
keys:
|
||||
- go-mod-v1-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
name: "Running test"
|
||||
command: |
|
||||
bash << parameters.script_path >>
|
||||
jobs:
|
||||
setup_dependencies:
|
||||
executor: golang
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore go modules cache"
|
||||
keys:
|
||||
- go-mod-v1-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
command: |
|
||||
mkdir -p /tmp/bin
|
||||
- run:
|
||||
name: Cache go modules
|
||||
command: make go-mod-cache
|
||||
- run:
|
||||
name: tools
|
||||
command: make tools
|
||||
- run:
|
||||
name: "Build binaries"
|
||||
command: make install install_abci
|
||||
- save_cache:
|
||||
name: "Save go modules cache"
|
||||
key: go-mod-v1-{{ checksum "go.sum" }}
|
||||
paths:
|
||||
- "/go/pkg/mod"
|
||||
- save_cache:
|
||||
name: "Save source code cache"
|
||||
key: go-src-v1-{{ .Revision }}
|
||||
paths:
|
||||
- ".git"
|
||||
- persist_to_workspace:
|
||||
root: "/tmp/bin"
|
||||
paths:
|
||||
- "."
|
||||
|
||||
test_persistence:
|
||||
executor: golang
|
||||
steps:
|
||||
- run_test:
|
||||
script_path: test/persist/test_failure_indices.sh
|
||||
|
||||
test_p2p:
|
||||
environment:
|
||||
GOBIN: /home/circleci/.go_workspace/bin
|
||||
GOPATH: /home/circleci/.go_workspace
|
||||
machine:
|
||||
image: circleci/classic:latest
|
||||
parameters:
|
||||
ipv:
|
||||
type: integer
|
||||
default: 4
|
||||
steps:
|
||||
- checkout
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
- run: bash test/p2p/circleci.sh << parameters.ipv >>
|
||||
- store_artifacts:
|
||||
path: /home/circleci/project/test/p2p/logs
|
||||
|
||||
deploy_docs:
|
||||
executor: docs
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: "Pull versions"
|
||||
command: git fetch origin v0.32 v0.33
|
||||
- run:
|
||||
name: "Build docs"
|
||||
command: make build-docs
|
||||
- run:
|
||||
name: "Sync to S3"
|
||||
command: make sync-docs
|
||||
|
||||
prepare_build:
|
||||
executor: golang
|
||||
steps:
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- run:
|
||||
name: Get next release number
|
||||
command: |
|
||||
export LAST_TAG="`git describe --tags --abbrev=0 --match "${CIRCLE_BRANCH}.*"`"
|
||||
echo "Last tag: ${LAST_TAG}"
|
||||
if [ -z "${LAST_TAG}" ]; then
|
||||
export LAST_TAG="${CIRCLE_BRANCH}"
|
||||
echo "Last tag not found. Possibly fresh branch or feature branch. Setting ${LAST_TAG} as tag."
|
||||
fi
|
||||
export NEXT_TAG="`python -u scripts/release_management/bump-semver.py --version "${LAST_TAG}"`"
|
||||
echo "Next tag: ${NEXT_TAG}"
|
||||
echo "export CIRCLE_TAG=\"${NEXT_TAG}\"" > release-version.source
|
||||
- run:
|
||||
name: Build dependencies
|
||||
command: make tools
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- "release-version.source"
|
||||
- save_cache:
|
||||
key: v2-release-deps-{{ checksum "go.sum" }}
|
||||
paths:
|
||||
- "/go/pkg/mod"
|
||||
|
||||
build_artifacts:
|
||||
executor: golang
|
||||
parallelism: 5
|
||||
steps:
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore release dependencies cache"
|
||||
keys:
|
||||
- v2-release-deps-{{ checksum "go.sum" }}
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: Build artifact
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 0 ;then export GOOS=linux GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 1 ;then export GOOS=darwin GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 2 ;then export GOOS=windows GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 3 ;then export GOOS=linux GOARCH=arm && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 4 ;then export GOOS=linux GOARCH=arm64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
- persist_to_workspace:
|
||||
root: build
|
||||
paths:
|
||||
- "*.zip"
|
||||
- "tendermint_linux_amd64"
|
||||
|
||||
release_artifacts:
|
||||
executor: golang
|
||||
steps:
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: "Deploy to GitHub"
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
echo "---"
|
||||
ls -la /tmp/workspace/*.zip
|
||||
echo "---"
|
||||
python -u scripts/release_management/sha-files.py
|
||||
echo "---"
|
||||
cat /tmp/workspace/SHA256SUMS
|
||||
echo "---"
|
||||
export RELEASE_ID="`python -u scripts/release_management/github-draft.py`"
|
||||
echo "Release ID: ${RELEASE_ID}"
|
||||
#Todo: Parallelize uploads
|
||||
export GOOS=linux GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=darwin GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=windows GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=linux GOARCH=arm && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=linux GOARCH=arm64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
python -u scripts/release_management/github-upload.py --file "/tmp/workspace/SHA256SUMS" --id "${RELEASE_ID}"
|
||||
python -u scripts/release_management/github-publish.py --id "${RELEASE_ID}"
|
||||
release_docker:
|
||||
machine:
|
||||
image: ubuntu-1604:201903-01
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: "Deploy to Docker Hub"
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
cp /tmp/workspace/tendermint_linux_amd64 DOCKER/tendermint
|
||||
docker build --label="tendermint" --tag="tendermint/tendermint:${CIRCLE_TAG}" --tag="tendermint/tendermint:latest" "DOCKER"
|
||||
docker login -u "${DOCKERHUB_USER}" --password-stdin \<<< "${DOCKERHUB_PASS}"
|
||||
docker push "tendermint/tendermint"
|
||||
docker logout
|
||||
reproducible_builds:
|
||||
executor: golang
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
docker_layer_caching: true
|
||||
- run:
|
||||
name: Build tendermint
|
||||
no_output_timeout: 20m
|
||||
command: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y ruby
|
||||
bash -x ./scripts/gitian-build.sh all
|
||||
for os in darwin linux windows; do
|
||||
cp gitian-build-${os}/result/tendermint-${os}-res.yml .
|
||||
cp gitian-build-${os}/build/out/tendermint-*.tar.gz .
|
||||
rm -rf gitian-build-${os}/
|
||||
done
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/tendermint/tendermint/tendermint-darwin-res.yml
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/tendermint/tendermint/tendermint-linux-res.yml
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/tendermint/tendermint/tendermint-windows-res.yml
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/tendermint/tendermint/tendermint-*.tar.gz
|
||||
|
||||
# # Test RPC implementation against the swagger documented specs
|
||||
# contract_tests:
|
||||
# working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint
|
||||
# machine:
|
||||
# image: circleci/classic:latest
|
||||
# environment:
|
||||
# GOBIN: /home/circleci/.go_workspace/bin
|
||||
# GOPATH: /home/circleci/.go_workspace/
|
||||
# GOOS: linux
|
||||
# GOARCH: amd64
|
||||
# parallelism: 1
|
||||
# steps:
|
||||
# - checkout
|
||||
# - run:
|
||||
# name: Test RPC endpoints against swagger documentation
|
||||
# command: |
|
||||
# set -x
|
||||
# export PATH=~/.local/bin:$PATH
|
||||
# # install node and dredd
|
||||
# ./scripts/get_nodejs.sh
|
||||
# # build the binaries with a proper version of Go
|
||||
# docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux build-contract-tests-hooks
|
||||
# # This docker image works with go 1.7, we can install here the hook handler that contract-tests is going to use
|
||||
# go get github.com/snikch/goodman/cmd/goodman
|
||||
# make contract-tests
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
test-suite:
|
||||
jobs:
|
||||
- deploy_docs:
|
||||
context: tendermint-docs
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
tags:
|
||||
only:
|
||||
- /^v.*/
|
||||
- deploy_docs:
|
||||
context: tendermint-docs-staging
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- docs-staging
|
||||
- setup_dependencies
|
||||
- test_persistence:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
- test_p2p
|
||||
- test_p2p:
|
||||
name: test_p2p_ipv6
|
||||
ipv: 6
|
||||
- reproducible_builds:
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /v[0-9]+\.[0-9]+/
|
||||
# - contract_tests:
|
||||
# requires:
|
||||
# - setup_dependencies
|
||||
|
||||
release:
|
||||
jobs:
|
||||
- prepare_build
|
||||
- build_artifacts:
|
||||
requires:
|
||||
- prepare_build
|
||||
- release_artifacts:
|
||||
requires:
|
||||
- prepare_build
|
||||
- build_artifacts
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /v[0-9]+\.[0-9]+/
|
||||
- release_docker:
|
||||
requires:
|
||||
- prepare_build
|
||||
- build_artifacts
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /v[0-9]+\.[0-9]+/
|
||||
- master
|
||||
5
.dockerignore
Normal file
5
.dockerignore
Normal file
@@ -0,0 +1,5 @@
|
||||
build
|
||||
test/e2e/build
|
||||
test/e2e/networks
|
||||
test/logs
|
||||
test/p2p/data
|
||||
18
.github/CODEOWNERS
vendored
18
.github/CODEOWNERS
vendored
@@ -7,21 +7,19 @@
|
||||
# global owners are only requested if there isn't a more specific
|
||||
# codeowner specified below. For this reason, the global codeowners
|
||||
# are often repeated in package-level definitions.
|
||||
* @ebuchman @erikgrinaker @melekes @tessr
|
||||
* @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
|
||||
# Overrides for tooling packages
|
||||
.circleci/ @marbar3778 @ebuchman @erikgrinaker @melekes @tessr
|
||||
.github/ @marbar3778 @ebuchman @erikgrinaker @melekes @tessr
|
||||
DOCKER/ @marbar3778 @ebuchman @erikgrinaker @melekes @tessr
|
||||
.github/ @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
DOCKER/ @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
|
||||
# Overrides for core Tendermint packages
|
||||
abci/ @marbar3778 @ebuchman @erikgrinaker @melekes @tessr
|
||||
evidence/ @cmwaters @ebuchman @erikgrinaker @melekes @tessr
|
||||
light/ @cmwaters @melekes @ebuchman
|
||||
abci/ @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
evidence/ @cmwaters @ebuchman @melekes @tessr
|
||||
light/ @cmwaters @melekes @ebuchman @tessr
|
||||
|
||||
# Overrides for docs
|
||||
*.md @marbar3778 @ebuchman @erikgrinaker @melekes @tessr
|
||||
docs/ @marbar3778 @ebuchman @erikgrinaker @melekes @tessr
|
||||
|
||||
*.md @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
docs/ @marbar3778 @alexanderbez @ebuchman @erikgrinaker @melekes @tessr
|
||||
|
||||
|
||||
|
||||
6
codecov.yml → .github/codecov.yml
vendored
6
codecov.yml → .github/codecov.yml
vendored
@@ -2,9 +2,6 @@ coverage:
|
||||
precision: 2
|
||||
round: down
|
||||
range: "70...100"
|
||||
notify:
|
||||
after_n_builds: 4
|
||||
|
||||
status:
|
||||
project:
|
||||
default:
|
||||
@@ -12,6 +9,9 @@ coverage:
|
||||
patch: on
|
||||
changes: off
|
||||
|
||||
github_checks:
|
||||
annotations: false
|
||||
|
||||
comment:
|
||||
layout: "diff, files"
|
||||
behavior: default
|
||||
0
.mergify.yml → .github/mergify.yml
vendored
0
.mergify.yml → .github/mergify.yml
vendored
161
.github/workflows/coverage.yml
vendored
161
.github/workflows/coverage.yml
vendored
@@ -14,125 +14,114 @@ jobs:
|
||||
- name: Create a file with all the pkgs
|
||||
run: go list ./... > pkgs.txt
|
||||
- name: Split pkgs into 4 files
|
||||
run: split -n l/4 --additional-suffix=.txt ./pkgs.txt
|
||||
run: split -d -n l/4 pkgs.txt pkgs.txt.part.
|
||||
# cache multiple
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-aa"
|
||||
path: ./xaa.txt
|
||||
name: "${{ github.sha }}-00"
|
||||
path: ./pkgs.txt.part.00
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-ab"
|
||||
path: ./xab.txt
|
||||
name: "${{ github.sha }}-01"
|
||||
path: ./pkgs.txt.part.01
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-ac"
|
||||
path: ./xac.txt
|
||||
name: "${{ github.sha }}-02"
|
||||
path: ./pkgs.txt.part.02
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-ad"
|
||||
path: ./xad.txt
|
||||
name: "${{ github.sha }}-03"
|
||||
path: ./pkgs.txt.part.03
|
||||
|
||||
test-coverage-part-1:
|
||||
build-linux:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
needs: split-test-files
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
goarch: ["arm", "amd64"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v3
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
SUFFIX_FILTER: |
|
||||
.go
|
||||
.mod
|
||||
.sum
|
||||
SET_ENV_NAME_INSERTIONS: 1
|
||||
SET_ENV_NAME_LINES: 1
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-aa"
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- name: test & coverage report creation
|
||||
run: |
|
||||
cat xaa.txt | xargs go test -mod=readonly -timeout 8m -race -coverprofile=coverage.txt -covermode=atomic
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: codecov/codecov-action@v1.0.13
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: GOOS=linux GOARCH=${{ matrix.goarch }} make build
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
test-coverage-part-2:
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: split-test-files
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
part: ["00", "01", "02", "03"]
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v3
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
SUFFIX_FILTER: |
|
||||
.go
|
||||
.mod
|
||||
.sum
|
||||
SET_ENV_NAME_INSERTIONS: 1
|
||||
SET_ENV_NAME_LINES: 1
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-ab"
|
||||
if: "env.GIT_DIFF != ''"
|
||||
name: "${{ github.sha }}-${{ matrix.part }}"
|
||||
if: env.GIT_DIFF
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.15
|
||||
- name: test & coverage report creation
|
||||
run: |
|
||||
cat xab.txt | xargs go test -mod=readonly -timeout 5m -race -coverprofile=coverage.txt -covermode=atomic
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: codecov/codecov-action@v1.0.13
|
||||
cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: "env.GIT_DIFF != ''"
|
||||
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
|
||||
path: ./${{ matrix.part }}profile.out
|
||||
|
||||
test-coverage-part-3:
|
||||
upload-coverage-report:
|
||||
runs-on: ubuntu-latest
|
||||
needs: split-test-files
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v3
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
SUFFIX_FILTER: |
|
||||
.go
|
||||
.mod
|
||||
.sum
|
||||
SET_ENV_NAME_INSERTIONS: 1
|
||||
SET_ENV_NAME_LINES: 1
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-ac"
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- name: test & coverage report creation
|
||||
run: |
|
||||
cat xac.txt | xargs go test -mod=readonly -timeout 10m -race -coverprofile=coverage.txt -covermode=atomic
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: codecov/codecov-action@v1.0.13
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
test-coverage-part-4:
|
||||
runs-on: ubuntu-latest
|
||||
needs: split-test-files
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v3
|
||||
with:
|
||||
SUFFIX_FILTER: |
|
||||
.go
|
||||
.mod
|
||||
.sum
|
||||
SET_ENV_NAME_INSERTIONS: 1
|
||||
SET_ENV_NAME_LINES: 1
|
||||
name: "${{ github.sha }}-00-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-ad"
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- name: test & coverage report creation
|
||||
run: |
|
||||
cat xad.txt | xargs go test -mod=readonly -timeout 5m -race -coverprofile=coverage.txt -covermode=atomic
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: codecov/codecov-action@v1.0.13
|
||||
name: "${{ github.sha }}-01-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-02-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-03-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v1.2.1
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: "env.GIT_DIFF != ''"
|
||||
if: env.GIT_DIFF
|
||||
|
||||
59
.github/workflows/docker.yml
vendored
Normal file
59
.github/workflows/docker.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
name: Build & Push
|
||||
# Build & Push rebuilds the tendermint docker image on every push to master and creation of tags
|
||||
# and pushes the image to https://hub.docker.com/r/interchainio/simapp/tags
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc*" # Push events to matching v*, i.e. v1.0-rc1, v20.15.10-rc5
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
DOCKER_IMAGE=tendermint/tendermint
|
||||
VERSION=noop
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
elif [[ $GITHUB_REF == refs/heads/* ]]; then
|
||||
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
|
||||
if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then
|
||||
VERSION=latest
|
||||
fi
|
||||
fi
|
||||
TAGS="${DOCKER_IMAGE}:${VERSION}"
|
||||
if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
|
||||
TAGS="$TAGS,${DOCKER_IMAGE}:${VERSION}"
|
||||
fi
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@master
|
||||
with:
|
||||
platforms: all
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./DOCKER/Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
32
.github/workflows/docs.yml
vendored
Normal file
32
.github/workflows/docs.yml
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
name: Documentation
|
||||
# This job builds and deploys documentation to github pages.
|
||||
# It runs on every push to master, and can be manually triggered.
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
build-and-deploy:
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: tendermintdev/docker-website-deployment
|
||||
steps:
|
||||
- name: Checkout 🛎️
|
||||
uses: actions/checkout@v2.3.1
|
||||
with:
|
||||
persist-credentials: false
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install and Build 🔧
|
||||
run: |
|
||||
apk add rsync
|
||||
make build-docs
|
||||
|
||||
- name: Deploy 🚀
|
||||
uses: JamesIves/github-pages-deploy-action@3.7.1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
BRANCH: gh-pages
|
||||
FOLDER: ~/output
|
||||
57
.github/workflows/e2e-nightly.yml
vendored
Normal file
57
.github/workflows/e2e-nightly.yml
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
# Runs randomly generated E2E testnets nightly.
|
||||
name: e2e-nightly
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
|
||||
jobs:
|
||||
e2e-nightly-test:
|
||||
# Run parallel jobs for the listed testnet groups (must match the
|
||||
# ./build/generator -g flag)
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
group: ['00', '01', '02', '03']
|
||||
# todo: expand to multiple versions after 0.35 release
|
||||
branch: ['master', 'v0.34.x']
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.15'
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
with:
|
||||
ref: ${{ matrix.branch}}
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker generator runner
|
||||
|
||||
- name: Generate testnets
|
||||
working-directory: test/e2e
|
||||
# When changing -g, also change the matrix groups above
|
||||
run: ./build/generator -g 4 -d networks/nightly
|
||||
|
||||
- name: Run testnets in group ${{ matrix.group }}
|
||||
working-directory: test/e2e
|
||||
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
|
||||
|
||||
e2e-nightly-fail:
|
||||
needs: e2e-nightly-test
|
||||
if: ${{ failure() }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack on failure
|
||||
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly E2E Tests
|
||||
SLACK_ICON_EMOJI: ':skull:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Nightly E2E tests failed
|
||||
SLACK_FOOTER: ''
|
||||
42
.github/workflows/e2e.yml
vendored
Normal file
42
.github/workflows/e2e.yml
vendored
Normal file
@@ -0,0 +1,42 @@
|
||||
name: e2e
|
||||
# Runs the CI end-to-end test network on all pushes to master or release branches
|
||||
# and every pull request, but only if any Go files have been changed.
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
e2e-test:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.15'
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
|
||||
- name: Build
|
||||
working-directory: test/e2e
|
||||
# Run two make jobs in parallel, since we can't run steps in parallel.
|
||||
run: make -j2 docker runner
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
- name: Run CI testnet
|
||||
working-directory: test/e2e
|
||||
run: ./build/runner -f networks/ci.toml
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
- name: Emit logs on failure
|
||||
if: ${{ failure() }}
|
||||
working-directory: test/e2e
|
||||
run: ./build/runner -f networks/ci.toml logs
|
||||
69
.github/workflows/fuzz-nightly.yml
vendored
Normal file
69
.github/workflows/fuzz-nightly.yml
vendored
Normal file
@@ -0,0 +1,69 @@
|
||||
# Runs fuzzing nightly.
|
||||
name: fuzz-nightly
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
schedule:
|
||||
- cron: '0 3 * * *'
|
||||
|
||||
jobs:
|
||||
fuzz-nightly-test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.15'
|
||||
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install go-fuzz
|
||||
working-directory: test/fuzz
|
||||
run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
|
||||
|
||||
- name: Fuzz mempool
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-addrbook
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-addrbook
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-pex
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-pex
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-sc
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-p2p-sc
|
||||
continue-on-error: true
|
||||
|
||||
- name: Fuzz p2p-rpc-server
|
||||
working-directory: test/fuzz
|
||||
run: timeout -s SIGINT --preserve-status 10m make fuzz-rpc-server
|
||||
continue-on-error: true
|
||||
|
||||
- name: Set crashers count
|
||||
working-directory: test/fuzz
|
||||
run: echo "::set-output name=crashers-count::$(find . -type d -name "crashers" | xargs -I % sh -c 'ls % | wc -l' | awk '{total += $1} END {print total}')"
|
||||
id: set-crashers-count
|
||||
|
||||
outputs:
|
||||
crashers_count: ${{ steps.set-crashers-count.outputs.crashers-count }}
|
||||
|
||||
fuzz-nightly-fail:
|
||||
needs: fuzz-nightly-test
|
||||
if: ${{ needs.set-crashers-count.outputs.crashers-count != 0 }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Notify Slack if any crashers
|
||||
uses: rtCamp/action-slack-notify@ae4223259071871559b6e9d08b24a63d71b3f0c0
|
||||
env:
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
|
||||
SLACK_CHANNEL: tendermint-internal
|
||||
SLACK_USERNAME: Nightly Fuzz Tests
|
||||
SLACK_ICON_EMOJI: ':firecracker:'
|
||||
SLACK_COLOR: danger
|
||||
SLACK_MESSAGE: Crashers found in Nightly Fuzz tests
|
||||
SLACK_FOOTER: ''
|
||||
2
.github/workflows/linkchecker.yml
vendored
2
.github/workflows/linkchecker.yml
vendored
@@ -7,6 +7,6 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.7
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.12
|
||||
with:
|
||||
folder-path: "docs"
|
||||
|
||||
18
.github/workflows/lint.yaml
vendored
18
.github/workflows/lint.yaml
vendored
@@ -11,19 +11,19 @@ jobs:
|
||||
golangci:
|
||||
name: golangci-lint
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
timeout-minutes: 8
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v3
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
SUFFIX_FILTER: |
|
||||
.go
|
||||
.mod
|
||||
.sum
|
||||
- uses: golangci/golangci-lint-action@v2.2.0
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v2.3.0
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.30
|
||||
version: v1.31
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: "env.GIT_DIFF != ''"
|
||||
if: env.GIT_DIFF
|
||||
|
||||
1
.github/workflows/linter.yml
vendored
1
.github/workflows/linter.yml
vendored
@@ -27,6 +27,5 @@ jobs:
|
||||
DEFAULT_BRANCH: master
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
VALIDATE_MD: true
|
||||
MARKDOWN_CONFIG_FILE: .markdownlint.yml
|
||||
VALIDATE_OPAENAPI: true
|
||||
VALIDATE_YAML: true
|
||||
|
||||
28
.github/workflows/net.yml
vendored
28
.github/workflows/net.yml
vendored
@@ -1,28 +0,0 @@
|
||||
name: Net
|
||||
# Net creates a 4 node test network with docker-compose
|
||||
# This workflow is run on every pull request, if a *{.go, .mod, .sum} file has been modified, and push to master and release/** branches
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "**.go"
|
||||
- "**.mod"
|
||||
- "**.sum"
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
net-short:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: 10 Blocks
|
||||
run: |
|
||||
set -x
|
||||
docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux
|
||||
make localnet-start &
|
||||
./scripts/localnet-blocks-test.sh 40 5 10 localhost
|
||||
|
||||
# Decide if we want to run longer lived testnets
|
||||
51
.github/workflows/proto-docker.yml
vendored
Normal file
51
.github/workflows/proto-docker.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
name: Build & Push TM Proto Builder
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
- "tools/proto/*"
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "tools/proto/*"
|
||||
schedule:
|
||||
# run this job once a month to recieve any go or buf updates
|
||||
- cron: "* * 1 * *"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
DOCKER_IMAGE=tendermintdev/docker-build-proto
|
||||
VERSION=noop
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
elif [[ $GITHUB_REF == refs/heads/* ]]; then
|
||||
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
|
||||
if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then
|
||||
VERSION=latest
|
||||
fi
|
||||
fi
|
||||
TAGS="${DOCKER_IMAGE}:${VERSION}"
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./tools/proto
|
||||
file: ./tools/proto/Dockerfile
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
1
.github/workflows/proto.yml
vendored
1
.github/workflows/proto.yml
vendored
@@ -2,6 +2,7 @@ name: Protobuf
|
||||
# Protobuf runs buf (https://buf.build/) lint and check-breakage
|
||||
# This workflow is only run when a .proto file has been modified
|
||||
on:
|
||||
workflow_dispatch: # allow running workflow manually
|
||||
pull_request:
|
||||
paths:
|
||||
- "**.proto"
|
||||
|
||||
40
.github/workflows/release.yml
vendored
Normal file
40
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
name: "Release"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "RC[0-9]/**"
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.15'
|
||||
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
|
||||
- name: Build
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
if: ${{ github.event_name == 'pull_request' }}
|
||||
with:
|
||||
version: latest
|
||||
args: build --skip-validate # skip validate skips initial sanity checks in order to be able to fully run
|
||||
|
||||
- name: Release
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
124
.github/workflows/tests.yml
vendored
124
.github/workflows/tests.yml
vendored
@@ -23,132 +23,124 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v3
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
SUFFIX_FILTER: |
|
||||
.go
|
||||
.mod
|
||||
.sum
|
||||
SET_ENV_NAME_INSERTIONS: 1
|
||||
SET_ENV_NAME_LINES: 1
|
||||
- name: Set GOBIN
|
||||
run: |
|
||||
echo "::add-path::$(go env GOPATH)/bin"
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: make install install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: actions/cache@v2.1.1
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: "env.GIT_DIFF != ''"
|
||||
if: env.GIT_DIFF
|
||||
# Cache binaries for use by other jobs
|
||||
- uses: actions/cache@v2.1.1
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: "env.GIT_DIFF != ''"
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: Build
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v3
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
SUFFIX_FILTER: |
|
||||
.go
|
||||
.mod
|
||||
.sum
|
||||
SET_ENV_NAME_INSERTIONS: 1
|
||||
SET_ENV_NAME_LINES: 1
|
||||
- name: Set GOBIN
|
||||
run: |
|
||||
echo "::add-path::$(go env GOPATH)/bin"
|
||||
- uses: actions/cache@v2.1.1
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: actions/cache@v2.1.1
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: "env.GIT_DIFF != ''"
|
||||
if: env.GIT_DIFF
|
||||
- name: test_abci_apps
|
||||
run: abci/tests/test_app/test.sh
|
||||
shell: bash
|
||||
if: "env.GIT_DIFF != ''"
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: Build
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v3
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
SUFFIX_FILTER: |
|
||||
.go
|
||||
.mod
|
||||
.sum
|
||||
SET_ENV_NAME_INSERTIONS: 1
|
||||
SET_ENV_NAME_LINES: 1
|
||||
- name: Set GOBIN
|
||||
run: |
|
||||
echo "::add-path::$(go env GOPATH)/bin"
|
||||
- uses: actions/cache@v2.1.1
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: actions/cache@v2.1.1
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: "env.GIT_DIFF != ''"
|
||||
if: env.GIT_DIFF
|
||||
- run: abci/tests/test_cli/test.sh
|
||||
shell: bash
|
||||
if: "env.GIT_DIFF != ''"
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: Build
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v3
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
SUFFIX_FILTER: |
|
||||
.go
|
||||
.mod
|
||||
.sum
|
||||
SET_ENV_NAME_INSERTIONS: 1
|
||||
SET_ENV_NAME_LINES: 1
|
||||
- name: Set GOBIN
|
||||
run: |
|
||||
echo "::add-path::$(go env GOPATH)/bin"
|
||||
- uses: actions/cache@v2.1.1
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: actions/cache@v2.1.1
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: "env.GIT_DIFF != ''"
|
||||
if: env.GIT_DIFF
|
||||
- name: test_apps
|
||||
run: test/app/test.sh
|
||||
shell: bash
|
||||
if: "env.GIT_DIFF != ''"
|
||||
if: env.GIT_DIFF
|
||||
|
||||
65
.gitignore
vendored
65
.gitignore
vendored
@@ -1,46 +1,47 @@
|
||||
*.swp
|
||||
*.swo
|
||||
.bak
|
||||
*.bak
|
||||
*.iml
|
||||
*.log
|
||||
*.swo
|
||||
*.swp
|
||||
*/.glide
|
||||
*/vendor
|
||||
.DS_Store
|
||||
build/*
|
||||
rpc/test/.tendermint
|
||||
.tendermint
|
||||
remote_dump
|
||||
.bak
|
||||
.idea/
|
||||
.revision
|
||||
vendor
|
||||
.tendermint
|
||||
.tendermint-lite
|
||||
.terraform
|
||||
.vagrant
|
||||
test/p2p/data/
|
||||
test/logs
|
||||
.vendor-new/
|
||||
.vscode/
|
||||
abci-cli
|
||||
addrbook.json
|
||||
artifacts/*
|
||||
build/*
|
||||
coverage.txt
|
||||
docs/.vuepress/dist
|
||||
docs/_build
|
||||
docs/dist
|
||||
docs/.vuepress/dist
|
||||
*.log
|
||||
abci-cli
|
||||
docs/node_modules/
|
||||
index.html.md
|
||||
|
||||
scripts/wal2json/wal2json
|
||||
scripts/cutWALUntil/cutWALUntil
|
||||
|
||||
.idea/
|
||||
*.iml
|
||||
|
||||
.vscode/
|
||||
|
||||
libs/pubsub/query/fuzz_test/output
|
||||
profile\.out
|
||||
remote_dump
|
||||
rpc/test/.tendermint
|
||||
scripts/cutWALUntil/cutWALUntil
|
||||
scripts/wal2json/wal2json
|
||||
shunit2
|
||||
|
||||
.tendermint-lite
|
||||
addrbook.json
|
||||
|
||||
*/vendor
|
||||
.vendor-new/
|
||||
*/.glide
|
||||
.terraform
|
||||
terraform.tfstate
|
||||
terraform.tfstate.backup
|
||||
terraform.tfstate.d
|
||||
|
||||
profile\.out
|
||||
test/e2e/build
|
||||
test/e2e/networks/*/
|
||||
test/logs
|
||||
test/maverick/maverick
|
||||
test/p2p/data/
|
||||
vendor
|
||||
test/fuzz/**/corpus
|
||||
test/fuzz/**/crashers
|
||||
test/fuzz/**/suppressions
|
||||
test/fuzz/**/*.zip
|
||||
|
||||
36
.goreleaser.yml
Normal file
36
.goreleaser.yml
Normal file
@@ -0,0 +1,36 @@
|
||||
project_name: Tendermint
|
||||
|
||||
env:
|
||||
# Require use of Go modules.
|
||||
- GO111MODULE=on
|
||||
|
||||
builds:
|
||||
- id: "tendermint"
|
||||
main: ./cmd/tendermint/main.go
|
||||
ldflags:
|
||||
- -s -w -X github.com/tendermint/tendermint/version.TMCoreSemVer={{ .Version }}
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- darwin
|
||||
- linux
|
||||
- windows
|
||||
goarch:
|
||||
- amd64
|
||||
- arm
|
||||
- arm64
|
||||
|
||||
checksum:
|
||||
name_template: SHA256SUMS-{{.Version}}.txt
|
||||
algorithm: sha256
|
||||
|
||||
release:
|
||||
name_template: "{{.Version}} (WARNING: BETA SOFTWARE)"
|
||||
|
||||
archives:
|
||||
- files:
|
||||
- LICENSE
|
||||
- README.md
|
||||
- UPGRADING.md
|
||||
- SECURITY.md
|
||||
- CHANGELOG.md
|
||||
@@ -1,5 +1,6 @@
|
||||
docs/node_modules
|
||||
CHANGELOG.md
|
||||
docs/architecture/*
|
||||
crypto/secp256k1/**
|
||||
scripts/*
|
||||
.github
|
||||
|
||||
8
.vscode/settings.json
vendored
8
.vscode/settings.json
vendored
@@ -1,8 +0,0 @@
|
||||
{
|
||||
"protoc": {
|
||||
"options": [
|
||||
"--proto_path=${workspaceRoot}/proto",
|
||||
"--proto_path=${workspaceRoot}/third_party/proto"
|
||||
]
|
||||
}
|
||||
}
|
||||
335
CHANGELOG.md
335
CHANGELOG.md
@@ -1,246 +1,261 @@
|
||||
# Changelog
|
||||
|
||||
## v0.34.0-rc4
|
||||
## v0.34.3
|
||||
|
||||
*September 24, 2020*
|
||||
*January 19, 2021*
|
||||
|
||||
This release includes a fix for a high-severity security vulnerability.
|
||||
More information on this vulnerability will be released on January 26, 2021
|
||||
and this changelog will be updated.
|
||||
|
||||
It also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for
|
||||
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
|
||||
|
||||
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [evidence] [N/A] Use correct source of evidence time (@cmwaters)
|
||||
- [proto] [\#5886](https://github.com/tendermint/tendermint/pull/5889) Bump gogoproto to 1.3.2 (@marbar3778)
|
||||
|
||||
## v0.34.2
|
||||
|
||||
*January 12, 2021*
|
||||
|
||||
This release fixes a substantial bug in evidence handling where evidence could
|
||||
sometimes be broadcast before the block containing that evidence was fully committed,
|
||||
resulting in some nodes panicking when trying to verify said evidence.
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- Go API
|
||||
- [libs/os] [\#5871](https://github.com/tendermint/tendermint/issues/5871) `EnsureDir` now propagates IO errors and checks the file type (@erikgrinaker)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [evidence] [\#5890](https://github.com/tendermint/tendermint/pull/5890) Add a buffer to evidence from consensus to avoid broadcasting and proposing evidence before the
|
||||
height of such an evidence has finished (@cmwaters)
|
||||
- [statesync] [\#5889](https://github.com/tendermint/tendermint/issues/5889) Set `LastHeightConsensusParamsChanged` when bootstrapping Tendermint state (@cmwaters)
|
||||
|
||||
## v0.34.1
|
||||
|
||||
*January 6, 2021*
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
@p4u from vocdoni.io reported that the mempool might behave incorrectly under a
|
||||
high load. The consequences can range from pauses between blocks to the peers
|
||||
disconnecting from this node. As a temporary remedy (until the mempool package
|
||||
is refactored), the `max-batch-bytes` was disabled. Transactions will be sent
|
||||
one by one without batching.
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [config] [\#5315](https://github.com/tendermint/tendermint/issues/5315) Rename `prof_laddr` to `pprof_laddr` and move it to `rpc` section (@melekes)
|
||||
- [rpc] [\#5315](https://github.com/tendermint/tendermint/issues/5315) Remove `/unsafe_start_cpu_profiler`, `/unsafe_stop_cpu_profiler` and `/unsafe_write_heap_profile`. Please use pprof functionality instead (@melekes)
|
||||
- [rpc/client, rpc/jsonrpc/client] [\#5347](https://github.com/tendermint/tendermint/issues/5347) All client methods now accept `context.Context` as 1st param (@melekes)
|
||||
|
||||
- Apps
|
||||
- [abci] [\#5324](https://github.com/tendermint/tendermint/pull/5324) abci evidence type is an enum with two types of possible evidence (@cmwaters)
|
||||
|
||||
- P2P Protocol
|
||||
- [mempool] [\#5321](https://github.com/tendermint/tendermint/issues/5321) Batch transactions when broadcasting them to peers (@melekes) `MaxBatchBytes` new config setting defines the max size of one batch.
|
||||
- [cli] [\#5786](https://github.com/tendermint/tendermint/issues/5786) deprecate snake_case commands for hyphen-case (@cmwaters)
|
||||
|
||||
- Go API
|
||||
- [evidence] [\#5317](https://github.com/tendermint/tendermint/issues/5317) Remove ConflictingHeaders evidence type & CompositeEvidence Interface. (@marbar3778)
|
||||
- [evidence] [\#5318](https://github.com/tendermint/tendermint/issues/5318) Remove LunaticValidator evidence type. (@marbar3778)
|
||||
- [evidence] [\#5319](https://github.com/tendermint/tendermint/issues/5319) Remove Amnesia & potentialAmnesia evidence types and removed POLC. (@marbar3778)
|
||||
- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and change evidence interface (@cmwaters)
|
||||
- [params] [\#5319](https://github.com/tendermint/tendermint/issues/5319) Remove `ProofofTrialPeriod` from evidence params (@marbar3778)
|
||||
- [crypto/secp256k1] [\#5280](https://github.com/tendermint/tendermint/issues/5280) `secp256k1` has been removed from the Tendermint repo. (@marbar3778)
|
||||
- [light] [\#5347](https://github.com/tendermint/tendermint/issues/5347) `NewClient`, `NewHTTPClient`, `VerifyHeader` and `VerifyLightBlockAtHeight` now accept `context.Context` as 1st param (@melekes)
|
||||
- [state] [\#5348](https://github.com/tendermint/tendermint/issues/5348) Define an Interface for the state store. (@marbar3778)
|
||||
|
||||
### FEATURES
|
||||
|
||||
- [privval] [\#5239](https://github.com/tendermint/tendermint/issues/5239) Add `chainID` to requests from client. (@marbar3778)
|
||||
- [config] [\#5147](https://github.com/tendermint/tendermint/issues/5147) Add `--consensus.double_sign_check_height` flag and `DoubleSignCheckHeight` config variable. See [ADR-51](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-051-double-signing-risk-reduction.md)
|
||||
- [light] [\#5298](https://github.com/tendermint/tendermint/pull/5298) Morph validator set and signed header into light block (@cmwaters)
|
||||
- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and refactor evidence lifecycle (@cmwaters)
|
||||
- [libs/protoio] [\#5868](https://github.com/tendermint/tendermint/issues/5868) Return number of bytes read in `Reader.ReadMsg()` (@erikgrinaker)
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [blockchain] [\#5278](https://github.com/tendermint/tendermint/issues/5278) Verify only +2/3 of the signatures in a block when fast syncing. (@marbar3778)
|
||||
- [rpc] [\#5293](https://github.com/tendermint/tendermint/issues/5293) `/dial_peers` has added `private` and `unconditional` as parameters. (@marbar3778)
|
||||
- [types] [\#5340](https://github.com/tendermint/tendermint/issues/5340) Add check in `Header.ValidateBasic()` for block protocol version (@marbar3778)
|
||||
- [statesync] [\#5399](https://github.com/tendermint/tendermint/issues/5399) Add `discovery_time` configuration setting, and reduce default to 15s. (@erikgrinaker)
|
||||
- [mempool] [\#5813](https://github.com/tendermint/tendermint/issues/5813) Add `keep-invalid-txs-in-cache` config option. When set to true, mempool will keep invalid transactions in the cache (@p4u)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [blockchain] [\#5249](https://github.com/tendermint/tendermint/issues/5249) Fix fast sync halt with initial height > 1 (@erikgrinaker)
|
||||
- [statesync] [\#5302](https://github.com/tendermint/tendermint/issues/5302) Fix genesis state propagation to state sync routine (@erikgrinaker)
|
||||
- [statesync] [\#5320](https://github.com/tendermint/tendermint/issues/5320) Broadcast snapshot request to all pre-connected peers on start (@erikgrinaker)
|
||||
- [consensus] [\#5329](https://github.com/tendermint/tendermint/issues/5329) Fix wrong proposer schedule for validators returned by `InitChain` (@erikgrinaker)
|
||||
- [store] [\#5382](https://github.com/tendermint/tendermint/issues/5382) Fix race conditions when loading/saving/pruning blocks (@erikgrinaker)
|
||||
- [light] [\#5307](https://github.com/tendermint/tendermint/pull/5307) Persist correct proposer priority in light client validator sets (@cmwaters)
|
||||
- [docker] [\#5385](https://github.com/tendermint/tendermint/issues/5385) Fix incorrect `time_iota_ms` default setting causing block timestamp drift (@erikgrinaker)
|
||||
- [abci] [\#5395](https://github.com/tendermint/tendermint/issues/5395) Fix socket client error for state sync responses (@erikgrinaker)
|
||||
- [crypto] [\#5707](https://github.com/tendermint/tendermint/issues/5707) Fix infinite recursion in string formatting of Secp256k1 keys (@erikgrinaker)
|
||||
- [mempool] [\#5800](https://github.com/tendermint/tendermint/issues/5800) Disable `max-batch-bytes` (@melekes)
|
||||
- [p2p] [\#5868](https://github.com/tendermint/tendermint/issues/5868) Fix inbound traffic statistics and rate limiting in `MConnection` (@erikgrinaker)
|
||||
|
||||
## v0.34.0
|
||||
|
||||
## v0.34.0-rc3
|
||||
*November 19, 2020*
|
||||
|
||||
*August 13, 2020*
|
||||
|
||||
Special thanks to external contributors on this release: @SadPencil
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
|
||||
- Blockchain Protocol
|
||||
- [\#5193](https://github.com/tendermint/tendermint/pull/5193) Header hashes are no longer empty for empty inputs, notably `DataHash`, `EvidenceHash`, and `LastResultsHash` (@erikgrinaker)
|
||||
|
||||
- Go API
|
||||
- [evidence] [\#5181](https://github.com/tendermint/tendermint/pull/5181) Phantom validator evidence was removed (@cmwaters)
|
||||
- [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) `HashFromByteSlices` and `ProofsFromByteSlices` now return a hash for empty inputs, following RFC6962 (@erikgrinaker)
|
||||
- [crypto] [\#5214](https://github.com/tendermint/tendermint/issues/5214) Change `GenPrivKeySecp256k1` to `GenPrivKeyFromSecret` to be consistent with other keys (@marbar3778)
|
||||
- [state] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `State.InitialHeight` field to record initial block height, must be `1` (not `0`) to start from 1 (@erikgrinaker)
|
||||
- [state] [\#5231](https://github.com/tendermint/tendermint/issues/5231) `LoadStateFromDBOrGenesisFile()` and `LoadStateFromDBOrGenesisDoc()` no longer saves the state in the database if not found, the genesis state is simply returned (@erikgrinaker)
|
||||
- [crypto] [\#5236](https://github.com/tendermint/tendermint/issues/5236) `VerifyBytes` is now `VerifySignature` on the `crypto.PubKey` interface (@marbar3778)
|
||||
|
||||
### FEATURES:
|
||||
|
||||
- [abci] [\#5174](https://github.com/tendermint/tendermint/pull/5174) Add amnesia evidence and remove mock and potential amnesia evidence from abci (@cmwaters)
|
||||
- [abci] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `InitChain.InitialHeight` field giving the initial block height (@erikgrinaker)
|
||||
- [abci] [\#5227](https://github.com/tendermint/tendermint/pull/5227) Add `ResponseInitChain.app_hash` which is recorded in genesis block (@erikgrinaker)
|
||||
- [genesis] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `initial_height` field to specify the initial chain height (defaults to `1`) (@erikgrinaker)
|
||||
- [db] [\#5233](https://github.com/tendermint/tendermint/issues/5233) Add support for `badgerdb` database backend (@erikgrinaker)
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [evidence] [\#5219](https://github.com/tendermint/tendermint/pull/5219) Change the source of evidence time to block time (@cmwaters)
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
- [evidence] [\#5170](https://github.com/tendermint/tendermint/pull/5170) change abci evidence time to the time the infraction happened not the time the evidence was committed on the block (@cmwaters)
|
||||
- [node] [\#5211](https://github.com/tendermint/tendermint/issues/5211) Don't attempt fast sync when the ABCI application specifies ourself as the only validator via `InitChain` (@erikgrinaker)
|
||||
- [libs/rand] [\#5215](https://github.com/tendermint/tendermint/pull/5215) Fix out-of-memory error on unexpected argument of Str() (@SadPencil)
|
||||
|
||||
|
||||
## v0.34.0-rc2
|
||||
|
||||
*July 30, 2020*
|
||||
Holy smokes, this is a big one! For a more reader-friendly overview of the changes in 0.34.0
|
||||
(and of the changes you need to accommodate as a user), check out [UPGRADING.md](UPGRADING.md).
|
||||
|
||||
Special thanks to external contributors on this release: @james-ray, @fedekunze, @favadi, @alessio,
|
||||
@joe-bowman, @cuonglm
|
||||
@joe-bowman, @cuonglm, @SadPencil and @dongsam.
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
And as always, friendly reminder, that we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
### BREAKING CHANGES:
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
|
||||
- [evidence] [\#4959](https://github.com/tendermint/tendermint/issues/4959) Add json tags to `DuplicateVoteEvidence`
|
||||
- [light] [\#4946](https://github.com/tendermint/tendermint/issues/4946) `tendermint lite` cmd has been renamed to `tendermint light`
|
||||
- [privval] [\#4582](https://github.com/tendermint/tendermint/issues/4582) `round` in private_validator_state.json is no longer a string in json it is now a number
|
||||
- [config] [\#5315](https://github.com/tendermint/tendermint/pull/5315) Rename `prof_laddr` to `pprof_laddr` and move it to `rpc` section (@melekes)
|
||||
- [evidence] [\#4959](https://github.com/tendermint/tendermint/pull/4959) Add JSON tags to `DuplicateVoteEvidence` (@marbar3778)
|
||||
- [light] [\#4946](https://github.com/tendermint/tendermint/pull/4946) `tendermint lite` command has been renamed to `tendermint light` (@marbar3778)
|
||||
- [privval] [\#4582](https://github.com/tendermint/tendermint/pull/4582) `round` in private_validator_state.json is no longer JSON string; instead it is a number (@marbar3778)
|
||||
- [rpc] [\#4792](https://github.com/tendermint/tendermint/pull/4792) `/validators` are now sorted by voting power (@melekes)
|
||||
- [rpc] [\#4937](https://github.com/tendermint/tendermint/issues/4937) Return an error when `page` pagination param is 0 in `/validators`, `tx_search` (@melekes)
|
||||
- [rpc] [\#5137](https://github.com/tendermint/tendermint/issues/5137) The json tags of `gasWanted` & `gasUsed` in `ResponseCheckTx` & `ResponseDeliverTx` have been made snake_case. (`gas_wanted` & `gas_used`)
|
||||
- [rpc] [\#4947](https://github.com/tendermint/tendermint/pull/4947) Return an error when `page` pagination param is 0 in `/validators`, `tx_search` (@melekes)
|
||||
- [rpc] [\#5137](https://github.com/tendermint/tendermint/pull/5137) JSON tags of `gasWanted` and `gasUsed` in `ResponseCheckTx` and `ResponseDeliverTx` have been made snake_case (`gas_wanted` and `gas_used`) (@marbar3778)
|
||||
- [rpc] [\#5315](https://github.com/tendermint/tendermint/pull/5315) Remove `/unsafe_start_cpu_profiler`, `/unsafe_stop_cpu_profiler` and `/unsafe_write_heap_profile`. Please use pprof functionality instead (@melekes)
|
||||
- [rpc/client, rpc/jsonrpc/client] [\#5347](https://github.com/tendermint/tendermint/pull/5347) All client methods now accept `context.Context` as 1st param (@melekes)
|
||||
|
||||
- Apps
|
||||
|
||||
- [abci] [\#4704](https://github.com/tendermint/tendermint/pull/4704) Add ABCI methods `ListSnapshots`, `LoadSnapshotChunk`, `OfferSnapshot`, and `ApplySnapshotChunk` for state sync snapshots. `ABCIVersion` bumped to 0.17.0.
|
||||
- [abci] [\#4989](https://github.com/tendermint/tendermint/issues/4989) `Proof` within `ResponseQuery` has been renamed to `ProofOps`
|
||||
- [abci] `CheckTxType` Protobuf enum names are now uppercase, to follow Protobuf style guide
|
||||
- [abci] [\#4704](https://github.com/tendermint/tendermint/pull/4704) Add ABCI methods `ListSnapshots`, `LoadSnapshotChunk`, `OfferSnapshot`, and `ApplySnapshotChunk` for state sync snapshots. `ABCIVersion` bumped to 0.17.0. (@erikgrinaker)
|
||||
- [abci] [\#4989](https://github.com/tendermint/tendermint/pull/4989) `Proof` within `ResponseQuery` has been renamed to `ProofOps` (@marbar3778)
|
||||
- [abci] [\#5096](https://github.com/tendermint/tendermint/pull/5096) `CheckTxType` Protobuf enum names are now uppercase, to follow Protobuf style guide (@erikgrinaker)
|
||||
- [abci] [\#5324](https://github.com/tendermint/tendermint/pull/5324) ABCI evidence type is now an enum with two types of possible evidence (@cmwaters)
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- [blockchain] [\#4637](https://github.com/tendermint/tendermint/issues/4637) Migrate blockchain reactor(s) to Protobuf encoding
|
||||
- [evidence] [\#4949](https://github.com/tendermint/tendermint/issues/4949) Migrate evidence reactor to Protobuf encoding
|
||||
- [mempool] [\#4940](https://github.com/tendermint/tendermint/issues/4940) Migrate mempool from to Protobuf encoding
|
||||
- [p2p/pex] [\#4973](https://github.com/tendermint/tendermint/issues/4973) Migrate `p2p/pex` reactor to Protobuf encoding
|
||||
- [statesync] [\#4943](https://github.com/tendermint/tendermint/issues/4943) Migrate state sync reactor to Protobuf encoding
|
||||
- [blockchain] [\#4637](https://github.com/tendermint/tendermint/pull/4637) Migrate blockchain reactor(s) to Protobuf encoding (@marbar3778)
|
||||
- [evidence] [\#4949](https://github.com/tendermint/tendermint/pull/4949) Migrate evidence reactor to Protobuf encoding (@marbar3778)
|
||||
- [mempool] [\#4940](https://github.com/tendermint/tendermint/pull/4940) Migrate mempool from to Protobuf encoding (@marbar3778)
|
||||
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
|
||||
- `MaxBatchBytes` new config setting defines the max size of one batch.
|
||||
- [p2p/pex] [\#4973](https://github.com/tendermint/tendermint/pull/4973) Migrate `p2p/pex` reactor to Protobuf encoding (@marbar3778)
|
||||
- [statesync] [\#4943](https://github.com/tendermint/tendermint/pull/4943) Migrate state sync reactor to Protobuf encoding (@marbar3778)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
- [evidence] [\#4780](https://github.com/tendermint/tendermint/pull/4780) Cap evidence to an absolute number (@cmwaters)
|
||||
- Add `max_num` to consensus evidence parameters (default: 50 items).
|
||||
- [evidence] [\#4725](https://github.com/tendermint/tendermint/issues/4725) Remove `Pubkey` from `DuplicateVoteEvidence`
|
||||
- [state] [\#4845](https://github.com/tendermint/tendermint/issues/4845) Include `GasWanted` and `GasUsed` into `LastResultsHash` (@melekes)
|
||||
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
|
||||
- [evidence] [\#5499](https://github.com/tendermint/tendermint/pull/5449) Cap evidence to a maximum number of bytes (supercedes [\#4780](https://github.com/tendermint/tendermint/pull/4780)) (@cmwaters)
|
||||
- [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) Header hashes are no longer empty for empty inputs, notably `DataHash`, `EvidenceHash`, and `LastResultsHash` (@erikgrinaker)
|
||||
- [state] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Include `GasWanted` and `GasUsed` into `LastResultsHash` (@melekes)
|
||||
- [types] [\#4792](https://github.com/tendermint/tendermint/pull/4792) Sort validators by voting power to enable faster commit verification (@melekes)
|
||||
|
||||
- On-disk serialization
|
||||
|
||||
- [state] [\#4679](https://github.com/tendermint/tendermint/issues/4679) Migrate state module to Protobuf encoding
|
||||
- [state] [\#4679](https://github.com/tendermint/tendermint/pull/4679) Migrate state module to Protobuf encoding (@marbar3778)
|
||||
- `BlockStoreStateJSON` is now `BlockStoreState` and is encoded as binary in the database
|
||||
- [store] [\#4778](https://github.com/tendermint/tendermint/issues/4778) Migrate store module to Protobuf encoding
|
||||
- [store] [\#4778](https://github.com/tendermint/tendermint/pull/4778) Migrate store module to Protobuf encoding (@marbar3778)
|
||||
|
||||
- Light client, private validator
|
||||
|
||||
- [light] [\#4964](https://github.com/tendermint/tendermint/issues/4964) Migrate light module migration to Protobuf encoding
|
||||
- [privval] [\#4985](https://github.com/tendermint/tendermint/issues/4985) Migrate `privval` module to Protobuf encoding
|
||||
- [light] [\#4964](https://github.com/tendermint/tendermint/pull/4964) Migrate light module migration to Protobuf encoding (@marbar3778)
|
||||
- [privval] [\#4985](https://github.com/tendermint/tendermint/pull/4985) Migrate `privval` module to Protobuf encoding (@marbar3778)
|
||||
|
||||
- Go API
|
||||
|
||||
- [light] [\#4946](https://github.com/tendermint/tendermint/issues/4946) Rename `lite2` pkg to `light`. Remove `lite` implementation.
|
||||
- [consensus] [\#4582](https://github.com/tendermint/tendermint/pull/4582) RoundState: `Round`, `LockedRound` & `CommitRound` are now `int32` (@marbar3778)
|
||||
- [consensus] [\#4582](https://github.com/tendermint/tendermint/pull/4582) HeightVoteSet: `round` is now `int32` (@marbar3778)
|
||||
- [crypto] [\#4721](https://github.com/tendermint/tendermint/pull/4721) Remove `SimpleHashFromMap()` and `SimpleProofsFromMap()` (@erikgrinaker)
|
||||
- [crypto] [\#4940](https://github.com/tendermint/tendermint/issues/4940) All keys have become `[]byte` instead of `[<size>]byte`. The byte method no longer returns the marshaled value but just the `[]byte` form of the data.
|
||||
- [crypto] \4988 Removal of key type multisig
|
||||
- [crypto] [\#4940](https://github.com/tendermint/tendermint/pull/4940) All keys have become `[]byte` instead of `[<size>]byte`. The byte method no longer returns the marshaled value but just the `[]byte` form of the data. (@marbar3778)
|
||||
- [crypto] [\#4988](https://github.com/tendermint/tendermint/pull/4988) Removal of key type multisig (@marbar3778)
|
||||
- The key has been moved to the [Cosmos-SDK](https://github.com/cosmos/cosmos-sdk/blob/master/crypto/types/multisig/multisignature.go)
|
||||
- [crypto] [\#4989](https://github.com/tendermint/tendermint/issues/4989) Remove `Simple` prefixes from `SimpleProof`, `SimpleValueOp` & `SimpleProofNode`.
|
||||
- [crypto] [\#4989](https://github.com/tendermint/tendermint/pull/4989) Remove `Simple` prefixes from `SimpleProof`, `SimpleValueOp` & `SimpleProofNode`. (@marbar3778)
|
||||
- `merkle.Proof` has been renamed to `ProofOps`.
|
||||
- Protobuf messages `Proof` & `ProofOp` has been moved to `proto/crypto/merkle`
|
||||
- `SimpleHashFromByteSlices` has been renamed to `HashFromByteSlices`
|
||||
- `SimpleHashFromByteSlicesIterative` has been renamed to `HashFromByteSlicesIterative`
|
||||
- `SimpleProofsFromByteSlices` has been renamed to `ProofsFromByteSlices`
|
||||
- [crypto] [\#4941](https://github.com/tendermint/tendermint/issues/4941) Remove suffixes from all keys.
|
||||
- [crypto] [\#4941](https://github.com/tendermint/tendermint/pull/4941) Remove suffixes from all keys. (@marbar3778)
|
||||
- ed25519: type `PrivKeyEd25519` is now `PrivKey`
|
||||
- ed25519: type `PubKeyEd25519` is now `PubKey`
|
||||
- secp256k1: type`PrivKeySecp256k1` is now `PrivKey`
|
||||
- secp256k1: type`PubKeySecp256k1` is now `PubKey`
|
||||
- sr25519: type `PrivKeySr25519` is now `PrivKey`
|
||||
- sr25519: type `PubKeySr25519` is now `PubKey`
|
||||
- multisig: type `PubKeyMultisigThreshold` is now `PubKey`
|
||||
- [libs] [\#4831](https://github.com/tendermint/tendermint/issues/4831) Remove `Bech32` pkg from Tendermint. This pkg now lives in the [cosmos-sdk](https://github.com/cosmos/cosmos-sdk/tree/4173ea5ebad906dd9b45325bed69b9c655504867/types/bech32)
|
||||
- [rpc/client] [\#4947](https://github.com/tendermint/tendermint/issues/4947) `Validators`, `TxSearch` `page`/`per_page` params become pointers (@melekes)
|
||||
- `UnconfirmedTxs` `limit` param is a pointer
|
||||
- [proto] [\#5025](https://github.com/tendermint/tendermint/issues/5025) All proto files have been moved to `/proto` directory.
|
||||
- [crypto] [\#5214](https://github.com/tendermint/tendermint/pull/5214) Change `GenPrivKeySecp256k1` to `GenPrivKeyFromSecret` to be consistent with other keys (@marbar3778)
|
||||
- [crypto] [\#5236](https://github.com/tendermint/tendermint/pull/5236) `VerifyBytes` is now `VerifySignature` on the `crypto.PubKey` interface (@marbar3778)
|
||||
- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and change evidence interface (@cmwaters)
|
||||
- [libs] [\#4831](https://github.com/tendermint/tendermint/pull/4831) Remove `Bech32` pkg from Tendermint. This pkg now lives in the [cosmos-sdk](https://github.com/cosmos/cosmos-sdk/tree/4173ea5ebad906dd9b45325bed69b9c655504867/types/bech32) (@marbar3778)
|
||||
- [light] [\#4946](https://github.com/tendermint/tendermint/pull/4946) Rename `lite2` pkg to `light`. Remove `lite` implementation. (@marbar3778)
|
||||
- [light] [\#5347](https://github.com/tendermint/tendermint/pull/5347) `NewClient`, `NewHTTPClient`, `VerifyHeader` and `VerifyLightBlockAtHeight` now accept `context.Context` as 1st param (@melekes)
|
||||
- [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) `HashFromByteSlices` and `ProofsFromByteSlices` now return a hash for empty inputs, following RFC6962 (@erikgrinaker)
|
||||
- [proto] [\#5025](https://github.com/tendermint/tendermint/pull/5025) All proto files have been moved to `/proto` directory. (@marbar3778)
|
||||
- Using the recommended the file layout from buf, [see here for more info](https://buf.build/docs/lint-checkers#file_layout)
|
||||
- [state] [\#4679](https://github.com/tendermint/tendermint/issues/4679) `TxResult` is a Protobuf type defined in `abci` types directory
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/issues/4939) `SignedMsgType` has moved to a Protobuf enum types
|
||||
- [types] [\#4962](https://github.com/tendermint/tendermint/issues/4962) `ConsensusParams`, `BlockParams`, `EvidenceParams`, `ValidatorParams` & `HashedParams` are now Protobuf types
|
||||
- [types] [\#4852](https://github.com/tendermint/tendermint/issues/4852) Vote & Proposal `SignBytes` is now func `VoteSignBytes` & `ProposalSignBytes`
|
||||
- [types] [\#4798](https://github.com/tendermint/tendermint/issues/4798) Simplify `VerifyCommitTrusting` func + remove extra validation (@melekes)
|
||||
- [types] [\#4845](https://github.com/tendermint/tendermint/issues/4845) Remove `ABCIResult`
|
||||
- [types] [\#5029](https://github.com/tendermint/tendermint/issues/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/issues/4939) `Total` in `Parts` & `PartSetHeader` has been changed from a `int` to a `uint32`
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/issues/4939) Vote: `ValidatorIndex` & `Round` are now `int32`
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/issues/4939) Proposal: `POLRound` & `Round` are now `int32`
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/issues/4939) Block: `Round` is now `int32`
|
||||
- [consensus] [\#4582](https://github.com/tendermint/tendermint/issues/4582) RoundState: `Round`, `LockedRound` & `CommitRound` are now `int32`
|
||||
- [consensus] [\#4582](https://github.com/tendermint/tendermint/issues/4582) HeightVoteSet: `round` is now `int32`
|
||||
- [rpc/jsonrpc/server] [\#5141](https://github.com/tendermint/tendermint/issues/5141) Remove `WriteRPCResponseArrayHTTP` (use `WriteRPCResponseHTTP` instead) (@melekes)
|
||||
- [rpc/client] [\#4947](https://github.com/tendermint/tendermint/pull/4947) `Validators`, `TxSearch` `page`/`per_page` params become pointers (@melekes)
|
||||
- `UnconfirmedTxs` `limit` param is a pointer
|
||||
- [rpc/jsonrpc/server] [\#5141](https://github.com/tendermint/tendermint/pull/5141) Remove `WriteRPCResponseArrayHTTP` (use `WriteRPCResponseHTTP` instead) (@melekes)
|
||||
- [state] [\#4679](https://github.com/tendermint/tendermint/pull/4679) `TxResult` is a Protobuf type defined in `abci` types directory (@marbar3778)
|
||||
- [state] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `State.InitialHeight` field to record initial block height, must be `1` (not `0`) to start from 1 (@erikgrinaker)
|
||||
- [state] [\#5231](https://github.com/tendermint/tendermint/pull/5231) `LoadStateFromDBOrGenesisFile()` and `LoadStateFromDBOrGenesisDoc()` no longer saves the state in the database if not found, the genesis state is simply returned (@erikgrinaker)
|
||||
- [state] [\#5348](https://github.com/tendermint/tendermint/pull/5348) Define an Interface for the state store. (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `SignedMsgType` has moved to a Protobuf enum types (@marbar3778)
|
||||
- [types] [\#4962](https://github.com/tendermint/tendermint/pull/4962) `ConsensusParams`, `BlockParams`, `EvidenceParams`, `ValidatorParams` & `HashedParams` are now Protobuf types (@marbar3778)
|
||||
- [types] [\#4852](https://github.com/tendermint/tendermint/pull/4852) Vote & Proposal `SignBytes` is now func `VoteSignBytes` & `ProposalSignBytes` (@marbar3778)
|
||||
- [types] [\#4798](https://github.com/tendermint/tendermint/pull/4798) Simplify `VerifyCommitTrusting` func + remove extra validation (@melekes)
|
||||
- [types] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Remove `ABCIResult` (@melekes)
|
||||
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `Total` in `Parts` & `PartSetHeader` has been changed from a `int` to a `uint32` (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Vote: `ValidatorIndex` & `Round` are now `int32` (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Proposal: `POLRound` & `Round` are now `int32` (@marbar3778)
|
||||
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Block: `Round` is now `int32` (@marbar3778)
|
||||
|
||||
### FEATURES:
|
||||
### FEATURES
|
||||
|
||||
- [abci] [\#5031](https://github.com/tendermint/tendermint/issues/5031) Add `AppVersion` to consensus parameters (@james-ray)
|
||||
- ... making it possible to update your ABCI application version via `EndBlock` response
|
||||
- [abci] [\#5031](https://github.com/tendermint/tendermint/pull/5031) Add `AppVersion` to consensus parameters (@james-ray)
|
||||
- This makes it possible to update your ABCI application version via `EndBlock` response
|
||||
- [abci] [\#5174](https://github.com/tendermint/tendermint/pull/5174) Remove `MockEvidence` in favor of testing with actual evidence types (`DuplicateVoteEvidence` & `LightClientAttackEvidence`) (@cmwaters)
|
||||
- [abci] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `InitChain.InitialHeight` field giving the initial block height (@erikgrinaker)
|
||||
- [abci] [\#5227](https://github.com/tendermint/tendermint/pull/5227) Add `ResponseInitChain.app_hash` which is recorded in genesis block (@erikgrinaker)
|
||||
- [config] [\#5147](https://github.com/tendermint/tendermint/pull/5147) Add `--consensus.double_sign_check_height` flag and `DoubleSignCheckHeight` config variable. See [ADR-51](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-051-double-signing-risk-reduction.md) (@dongsam)
|
||||
- [db] [\#5233](https://github.com/tendermint/tendermint/pull/5233) Add support for `badgerdb` database backend (@erikgrinaker)
|
||||
- [evidence] [\#4532](https://github.com/tendermint/tendermint/pull/4532) Handle evidence from light clients (@melekes)
|
||||
- [evidence] [#4821](https://github.com/tendermint/tendermint/pull/4821) Amnesia evidence can be detected, verified and committed (@cmwaters)
|
||||
- [light] [\#4532](https://github.com/tendermint/tendermint/pull/4532) Submit conflicting headers, if any, to a full node & all witnesses (@melekes)
|
||||
- [p2p] [\#4981](https://github.com/tendermint/tendermint/issues/4981) Expose `SaveAs` func on NodeKey (@melekes)
|
||||
- [evidence] [#4821](https://github.com/tendermint/tendermint/pull/4821) Amnesia (light client attack) evidence can be detected, verified and committed (@cmwaters)
|
||||
- [genesis] [\#5191](https://github.com/tendermint/tendermint/pull/5191) Add `initial_height` field to specify the initial chain height (defaults to `1`) (@erikgrinaker)
|
||||
- [libs/math] [\#5665](https://github.com/tendermint/tendermint/pull/5665) Make fractions unsigned integers (uint64) (@cmwaters)
|
||||
- [light] [\#5298](https://github.com/tendermint/tendermint/pull/5298) Morph validator set and signed header into light block (@cmwaters)
|
||||
- [p2p] [\#4981](https://github.com/tendermint/tendermint/pull/4981) Expose `SaveAs` func on NodeKey (@melekes)
|
||||
- [privval] [\#5239](https://github.com/tendermint/tendermint/pull/5239) Add `chainID` to requests from client. (@marbar3778)
|
||||
- [rpc] [\#4532](https://github.com/tendermint/tendermint/pull/4923) Support `BlockByHash` query (@fedekunze)
|
||||
- [rpc] [\#4979](https://github.com/tendermint/tendermint/issues/4979) Support EXISTS operator in `/tx_search` query (@melekes)
|
||||
- [rpc] [\#5017](https://github.com/tendermint/tendermint/issues/5017) Add `/check_tx` endpoint to check transactions without executing them or adding them to the mempool (@melekes)
|
||||
- [statesync] Add state sync support, where a new node can be rapidly bootstrapped by fetching state snapshots from peers instead of replaying blocks. See the `[statesync]` config section.
|
||||
- [rpc] [\#4979](https://github.com/tendermint/tendermint/pull/4979) Support EXISTS operator in `/tx_search` query (@melekes)
|
||||
- [rpc] [\#5017](https://github.com/tendermint/tendermint/pull/5017) Add `/check_tx` endpoint to check transactions without executing them or adding them to the mempool (@melekes)
|
||||
- [rpc] [\#5108](https://github.com/tendermint/tendermint/pull/5108) Subscribe using the websocket for new evidence events (@cmwaters)
|
||||
- [statesync] Add state sync support, where a new node can be rapidly bootstrapped by fetching state snapshots from peers instead of replaying blocks. See the `[statesync]` config section.
|
||||
- [evidence] [\#5361](https://github.com/tendermint/tendermint/pull/5361) Add LightClientAttackEvidence and refactor evidence lifecycle - for more information see [ADR-059](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-059-evidence-composition-and-lifecycle.md) (@cmwaters)
|
||||
|
||||
### IMPROVEMENTS:
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [consensus] [\#4578](https://github.com/tendermint/tendermint/issues/4578) Attempt to repair the consensus WAL file (`data/cs.wal/wal`) automatically in case of corruption (@alessio)
|
||||
- [blockchain] [\#5278](https://github.com/tendermint/tendermint/pull/5278) Verify only +2/3 of the signatures in a block when fast syncing. (@marbar3778)
|
||||
- [consensus] [\#4578](https://github.com/tendermint/tendermint/pull/4578) Attempt to repair the consensus WAL file (`data/cs.wal/wal`) automatically in case of corruption (@alessio)
|
||||
- The original WAL file will be backed up to `data/cs.wal/wal.CORRUPTED`.
|
||||
- [evidence] [\#4722](https://github.com/tendermint/tendermint/pull/4722) Improved evidence db (@cmwaters)
|
||||
- [consensus] [\#5143](https://github.com/tendermint/tendermint/pull/5143) Only call `privValidator.GetPubKey` once per block (@melekes)
|
||||
- [evidence] [\#4722](https://github.com/tendermint/tendermint/pull/4722) Consolidate evidence store and pool types to improve evidence DB (@cmwaters)
|
||||
- [evidence] [\#4839](https://github.com/tendermint/tendermint/pull/4839) Reject duplicate evidence from being proposed (@cmwaters)
|
||||
- [evidence] [\#4892](https://github.com/tendermint/tendermint/pull/4892) Remove redundant header from phantom validator evidence (@cmwaters)
|
||||
- [evidence] [\#5219](https://github.com/tendermint/tendermint/pull/5219) Change the source of evidence time to block time (@cmwaters)
|
||||
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
|
||||
- [light] [\#4935](https://github.com/tendermint/tendermint/pull/4935) Fetch and compare a new header with witnesses in parallel (@melekes)
|
||||
- [light] [\#4929](https://github.com/tendermint/tendermint/pull/4929) compare header w/ witnesses only when doing bisection (@melekes)
|
||||
- [light] [\#4916](https://github.com/tendermint/tendermint/pull/4916) validate basic for inbound validator sets and headers before further processing them (@cmwaters)
|
||||
- [p2p/conn] [\#4795](https://github.com/tendermint/tendermint/issues/4795) Return err on `signChallenge()` instead of panic
|
||||
- [light] [\#4929](https://github.com/tendermint/tendermint/pull/4929) Compare header with witnesses only when doing bisection (@melekes)
|
||||
- [light] [\#4916](https://github.com/tendermint/tendermint/pull/4916) Validate basic for inbound validator sets and headers before further processing them (@cmwaters)
|
||||
- [mempool] Add RemoveTxByKey() exported function for custom mempool cleaning (@p4u)
|
||||
- [p2p/conn] [\#4795](https://github.com/tendermint/tendermint/pull/4795) Return err on `signChallenge()` instead of panic
|
||||
- [privval] [\#5437](https://github.com/tendermint/tendermint/pull/5437) `NewSignerDialerEndpoint` can now be given `SignerServiceEndpointOption` (@erikgrinaker)
|
||||
- [rpc] [\#4968](https://github.com/tendermint/tendermint/pull/4968) JSON encoding is now handled by `libs/json`, not Amino (@erikgrinaker)
|
||||
- [rpc] [\#5293](https://github.com/tendermint/tendermint/pull/5293) `/dial_peers` has added `private` and `unconditional` as parameters. (@marbar3778)
|
||||
- [state] [\#4781](https://github.com/tendermint/tendermint/pull/4781) Export `InitStateVersion` for the initial state version (@erikgrinaker)
|
||||
- [txindex] [\#4466](https://github.com/tendermint/tendermint/pull/4466) Allow to index an event at runtime (@favadi)
|
||||
- `abci.EventAttribute` replaces `KV.Pair`
|
||||
- [libs] [\#5126](https://github.com/tendermint/tendermint/issues/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking
|
||||
- [types] [\#4905](https://github.com/tendermint/tendermint/pull/4905) Add `ValidateBasic` to validator and validator set (@cmwaters)
|
||||
- [rpc] [\#4968](https://github.com/tendermint/tendermint/issues/4968) JSON encoding is now handled by `libs/json`, not Amino
|
||||
- [mempool] Add RemoveTxByKey() exported function for custom mempool cleaning (@p4u)
|
||||
- [consensus] [\#5143](https://github.com/tendermint/tendermint/issues/5143) Only call `privValidator.GetPubKey` once per block (@melekes)
|
||||
- [types] [\#5340](https://github.com/tendermint/tendermint/pull/5340) Add check in `Header.ValidateBasic()` for block protocol version (@marbar3778)
|
||||
- [types] [\#5490](https://github.com/tendermint/tendermint/pull/5490) Use `Commit` and `CommitSig` max sizes instead of vote max size to calculate the maximum block size. (@cmwaters)
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
- [blockchain/v2] Correctly set block store base in status responses (@erikgrinaker)
|
||||
### BUG FIXES
|
||||
|
||||
- [abci/grpc] [\#5520](https://github.com/tendermint/tendermint/pull/5520) Return async responses in order, to avoid mempool panics. (@erikgrinaker)
|
||||
- [blockchain/v2] [\#4971](https://github.com/tendermint/tendermint/pull/4971) Correctly set block store base in status responses (@erikgrinaker)
|
||||
- [blockchain/v2] [\#5499](https://github.com/tendermint/tendermint/pull/5499) Fix "duplicate block enqueued by processor" panic (@melekes)
|
||||
- [blockchain/v2] [\#5530](https://github.com/tendermint/tendermint/pull/5530) Fix out of order block processing panic (@melekes)
|
||||
- [blockchain/v2] [\#5553](https://github.com/tendermint/tendermint/pull/5553) Make the removal of an already removed peer a noop (@melekes)
|
||||
- [consensus] [\#4895](https://github.com/tendermint/tendermint/pull/4895) Cache the address of the validator to reduce querying a remote KMS (@joe-bowman)
|
||||
- [consensus] [\#4970](https://github.com/tendermint/tendermint/issues/4970) Stricter on `LastCommitRound` check (@cuonglm)
|
||||
- [p2p][\#5136](https://github.com/tendermint/tendermint/pull/5136) Fix error for peer with the same ID but different IPs (@valardragon)
|
||||
- [proxy] [\#5078](https://github.com/tendermint/tendermint/issues/5078) Fix a bug, where TM does not exit when ABCI app crashes (@melekes)
|
||||
|
||||
|
||||
## v0.34.0-rc1
|
||||
|
||||
This release was removed, as a premature GitHub tag was recorded on sum.golang.org causing checksum errors.
|
||||
- [consensus] [\#4970](https://github.com/tendermint/tendermint/pull/4970) Don't allow `LastCommitRound` to be negative (@cuonglm)
|
||||
- [consensus] [\#5329](https://github.com/tendermint/tendermint/pull/5329) Fix wrong proposer schedule for validators returned by `InitChain` (@erikgrinaker)
|
||||
- [docker] [\#5385](https://github.com/tendermint/tendermint/pull/5385) Fix incorrect `time_iota_ms` default setting causing block timestamp drift (@erikgrinaker)
|
||||
- [evidence] [\#5170](https://github.com/tendermint/tendermint/pull/5170) Change ABCI evidence time to the time the infraction happened not the time the evidence was committed on the block (@cmwaters)
|
||||
- [evidence] [\#5610](https://github.com/tendermint/tendermint/pull/5610) Make it possible for ABCI evidence to be formed from Tendermint evidence (@cmwaters)
|
||||
- [libs/rand] [\#5215](https://github.com/tendermint/tendermint/pull/5215) Fix out-of-memory error on unexpected argument of Str() (@SadPencil)
|
||||
- [light] [\#5307](https://github.com/tendermint/tendermint/pull/5307) Persist correct proposer priority in light client validator sets (@cmwaters)
|
||||
- [p2p] [\#5136](https://github.com/tendermint/tendermint/pull/5136) Fix error for peer with the same ID but different IPs (@valardragon)
|
||||
- [privval] [\#5638](https://github.com/tendermint/tendermint/pull/5638) Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
|
||||
- [proxy] [\#5078](https://github.com/tendermint/tendermint/pull/5078) Force Tendermint to exit when ABCI app crashes (@melekes)
|
||||
- [rpc] [\#5660](https://github.com/tendermint/tendermint/pull/5660) Set `application/json` as the `Content-Type` header in RPC responses. (@alexanderbez)
|
||||
- [store] [\#5382](https://github.com/tendermint/tendermint/pull/5382) Fix race conditions when loading/saving/pruning blocks (@erikgrinaker)
|
||||
|
||||
## v0.33.8
|
||||
|
||||
*August 11, 2020*
|
||||
|
||||
## Go security update
|
||||
### Go security update
|
||||
|
||||
Go reported a security vulnerability that affected the `encoding/binary` package. The most recent binary for tendermint is using 1.14.6, for this
|
||||
reason the Tendermint engineering team has opted to conduct a release to aid users in using the correct version of Go. Read more about the security issue [here](https://github.com/golang/go/issues/40618).
|
||||
@@ -322,6 +337,8 @@ need to update your code.**
|
||||
|
||||
## v0.33.5
|
||||
|
||||
*May 28, 2020*
|
||||
|
||||
Special thanks to external contributors on this release: @tau3,
|
||||
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
# Unreleased Changes
|
||||
|
||||
## v0.34.0-rc5
|
||||
## vX.X
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
|
||||
@@ -9,18 +9,53 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi
|
||||
### BREAKING CHANGES
|
||||
|
||||
- CLI/RPC/Config
|
||||
- [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
|
||||
- [config] \#5728 `fast_sync = "v1"` is no longer supported (@melekes)
|
||||
- [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
|
||||
- [cli] \#5777 use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters)
|
||||
|
||||
- Apps
|
||||
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
|
||||
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
|
||||
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
|
||||
|
||||
- P2P Protocol
|
||||
|
||||
- Go API
|
||||
- [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes)
|
||||
- [p2p] Removed unused function `MakePoWTarget`. (@erikgrinaker)
|
||||
- [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes)
|
||||
- [proto/p2p] Renamed `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
|
||||
- [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker)
|
||||
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
|
||||
- [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters)
|
||||
- [state] \#5864 Use an iterator when pruning state (@cmwaters)
|
||||
|
||||
- Blockchain Protocol
|
||||
|
||||
- Data Storage
|
||||
- [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters)
|
||||
|
||||
### FEATURES
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778)
|
||||
- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys.
|
||||
- [privval] \#5725 Add gRPC support to private validator.
|
||||
- [privval] \#5876 `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778)
|
||||
- [abci/client] \#5673 `Async` requests return an error if queue is full (@melekes)
|
||||
- [mempool] \#5673 Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes)
|
||||
- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
|
||||
- [blockchain/v1] \#5728 Remove in favor of v2 (@melekes)
|
||||
- [blockchain/v0] \#5741 Relax termination conditions and increase sync timeout (@melekes)
|
||||
- [cli] \#5772 `gen_node_key` output now contains node ID (`id` field) (@melekes)
|
||||
- [blockchain/v2] \#5774 Send status request when new peer joins (@melekes)
|
||||
- [consensus] \#5792 Deprecates the `time_iota_ms` consensus parameter, to reduce the bug surface. The parameter is no longer used. (@valardragon)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [types] \#5523 Change json naming of `PartSetHeader` within `BlockID` from `parts` to `part_set_header` (@marbar3778)
|
||||
- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
|
||||
- [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes)
|
||||
- [blockchain/v1] \#5711 Fix deadlock (@melekes)
|
||||
155
CONTRIBUTING.md
155
CONTRIBUTING.md
@@ -106,12 +106,12 @@ specify exactly the dependency you want to update, eg.
|
||||
|
||||
We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/gogo/protobuf) to generate code for use across Tendermint Core.
|
||||
|
||||
For linting and checking breaking changes, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`.
|
||||
For linting, checking breaking changes and generating proto stubs, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`.
|
||||
|
||||
There are two ways to generate your proto stubs.
|
||||
|
||||
1. Use Docker, pull an image that will generate your proto stubs with no need to install anything. `make proto-gen-docker`
|
||||
2. Run `make proto-gen` after installing `protoc` and gogoproto, you can do this by running `make protobuf`.
|
||||
2. Run `make proto-gen` after installing `buf` and `gogoproto`, you can do this by running `make protobuf`.
|
||||
|
||||
### Installation Instructions
|
||||
|
||||
@@ -127,18 +127,19 @@ make install
|
||||
|
||||
You should now be able to run `make proto-gen` from inside the root Tendermint directory to generate new files from proto files.
|
||||
|
||||
## Vagrant
|
||||
### Visual Studio Code
|
||||
|
||||
If you are a [Vagrant](https://www.vagrantup.com/) user, you can get started
|
||||
hacking Tendermint with the commands below.
|
||||
If you are a VS Code user, you may want to add the following to your `.vscode/settings.json`:
|
||||
|
||||
NOTE: In case you installed Vagrant in 2017, you might need to run
|
||||
`vagrant box update` to upgrade to the latest `ubuntu/xenial64`.
|
||||
|
||||
```sh
|
||||
vagrant up
|
||||
vagrant ssh
|
||||
make test
|
||||
```json
|
||||
{
|
||||
"protoc": {
|
||||
"options": [
|
||||
"--proto_path=${workspaceRoot}/proto",
|
||||
"--proto_path=${workspaceRoot}/third_party/proto"
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Changelog
|
||||
@@ -246,57 +247,107 @@ Each PR should have one commit once it lands on `master`; this can be accomplish
|
||||
|
||||
#### Major Release
|
||||
|
||||
1. start on `master`
|
||||
2. run integration tests (see `test_integrations` in Makefile)
|
||||
3. prepare release in a pull request against `master` (to be squash merged):
|
||||
- copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
This major release process assumes that this release was preceded by release candidates.
|
||||
If there were no release candidates, and you'd like to cut a major release directly from master, see below.
|
||||
|
||||
1. Start on the latest RC branch (`RCx/vX.X.0`).
|
||||
2. Run integration tests.
|
||||
3. Branch off of the RC branch (`git checkout -b release-prep`) and prepare the release:
|
||||
- "Squash" changes from the changelog entries for the RCs into a single entry,
|
||||
and add all changes included in `CHANGELOG_PENDING.md`.
|
||||
(Squashing includes both combining all entries, as well as removing or simplifying
|
||||
any intra-RC changes. It may also help to alphabetize the entries by package name.)
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all PRs
|
||||
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
|
||||
or other upgrading flows.
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Open a PR with these changes against the RC branch (`RCx/vX.X.0`).
|
||||
5. Once these changes are on the RC branch, branch off of the RC branch again to create a release branch:
|
||||
- `git checkout RCx/vX.X.0`
|
||||
- `git checkout -b release/vX.X.0`
|
||||
6. Push a tag with prepared release details. This will trigger the actual release `vX.X.0`.
|
||||
- `git tag -a vX.X.0 -m 'Release vX.X.0'`
|
||||
- `git push origin vX.X.0`
|
||||
7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
|
||||
8. Create the long-lived minor release branch `RC0/vX.X.1` for the next point release on this
|
||||
new major release series.
|
||||
|
||||
##### Major Release (from `master`)
|
||||
|
||||
1. Start on `master`
|
||||
2. Run integration tests (see `test_integrations` in Makefile)
|
||||
3. Prepare release in a pull request against `master` (to be squash merged):
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`; if this release
|
||||
had release candidates, squash all the RC updates into one
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all issues
|
||||
- run `bash ./scripts/authors.sh` to get a list of authors since the latest
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest
|
||||
release, and add the github aliases of external contributors to the top of
|
||||
the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- reset the `CHANGELOG_PENDING.md`
|
||||
- bump Tendermint version in `version.go`
|
||||
- bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- bump ABCI protocol version in `version.go`, if necessary
|
||||
- make sure all significant breaking changes are covered in `UPGRADING.md`
|
||||
4. push your changes with prepared release details to `vX.X` (this will trigger the release `vX.X.0`)
|
||||
5. merge back to master (don't squash merge!)
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- Bump ABCI protocol version in `version.go`, if necessary
|
||||
- Make sure all significant breaking changes are covered in `UPGRADING.md`
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Push a tag with prepared release details (this will trigger the release `vX.X.0`)
|
||||
- `git tag -a vX.X.x -m 'Release vX.X.x'`
|
||||
- `git push origin vX.X.x`
|
||||
5. Update the `CHANGELOG.md` file on master with the releases changelog.
|
||||
6. Delete any RC branches and tags for this release (if applicable)
|
||||
|
||||
#### Minor Release
|
||||
#### Minor Release (Point Releases)
|
||||
|
||||
Minor releases are done differently from major releases: They are built off of long-lived release candidate branches, rather than from master.
|
||||
Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master.
|
||||
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, and
|
||||
the backport branches have names like `v0.34.x` or `v0.33.x` (literally, `x`; it is not a placeholder in this case).
|
||||
|
||||
1. Checkout the long-lived release candidate branch: `git checkout rcX/vX.X.X`
|
||||
As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches.
|
||||
|
||||
Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate.
|
||||
|
||||
To create a minor release:
|
||||
|
||||
1. Checkout the long-lived backport branch: `git checkout vX.X.x`
|
||||
2. Run integration tests: `make test_integrations`
|
||||
3. Prepare the release:
|
||||
- copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- reset the `CHANGELOG_PENDING.md`
|
||||
- bump Tendermint version in `version.go`
|
||||
- bump P2P and block protocol versions in `version.go`, if necessary
|
||||
- bump ABCI protocol version in `version.go`, if necessary
|
||||
- make sure all significant breaking changes are covered in `UPGRADING.md`
|
||||
4. Create a release branch `release/vX.X.x` off the release candidate branch:
|
||||
- `git checkout -b release/vX.X.x`
|
||||
- `git push -u origin release/vX.X.x`
|
||||
- Note that all branches prefixed with `release` are protected once pushed. You will need admin help to make any changes to the branch.
|
||||
5. Open a pull request of the new minor release branch onto the latest major release branch `vX.X` and then rebase to merge. This will start the release process.
|
||||
3. Check out a new branch and prepare the release:
|
||||
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
|
||||
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- Reset the `CHANGELOG_PENDING.md`
|
||||
- Bump the ABCI version number, if necessary.
|
||||
(Note that ABCI follows semver, and that ABCI versions are the only versions
|
||||
which can change during minor releases, and only field additions are valid minor changes.)
|
||||
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
|
||||
4. Open a PR with these changes that will land them back on `vX.X.x`
|
||||
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
|
||||
- `git tag -a vX.X.x -m 'Release vX.X.x'`
|
||||
- `git push origin vX.X.x`
|
||||
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
|
||||
- Remove all `R:minor` labels from the pull requests that were included in the release.
|
||||
- Do not merge the release branch into master.
|
||||
7. Delete the former long lived release candidate branch once the release has been made.
|
||||
8. Create a new release candidate branch to be used for the next release.
|
||||
- Do not merge the backport branch into master.
|
||||
|
||||
#### Backport Release
|
||||
#### Release Candidates
|
||||
|
||||
1. start from the existing release branch you want to backport changes to (e.g. v0.30)
|
||||
Branch to a release/vX.X.X branch locally (e.g. release/v0.30.7)
|
||||
2. cherry pick the commit(s) that contain the changes you want to backport (usually these commits are from squash-merged PRs which were already reviewed)
|
||||
3. steps 2 and 3 from [Major Release](#major-release)
|
||||
4. push changes to release/vX.X.X branch
|
||||
5. open a PR against the existing vX.X branch
|
||||
Before creating an official release, especially a major release, we may want to create a
|
||||
release candidate (RC) for our friends and partners to test out. We use git tags to
|
||||
create RCs, and we build them off of RC branches. RC branches typically have names formatted
|
||||
like `RCX/vX.X.X` (or, concretely, `RC0/v0.34.0`), while the tags themselves follow
|
||||
the "standard" release naming conventions, with `-rcX` at the end (`vX.X.X-rcX`).
|
||||
|
||||
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
|
||||
have distinct names from the tags/release names.)
|
||||
|
||||
1. Start from the RC branch (e.g. `RC0/v0.34.0`).
|
||||
2. Create the new tag, specifying a name and a tag "message":
|
||||
`git tag -a v0.34.0-rc0 -m "Release Candidate v0.34.0-rc0`
|
||||
3. Push the tag back up to origin:
|
||||
`git push origin v0.34.0-rc4`
|
||||
Now the tag should be available on the repo's releases page.
|
||||
4. Create a new release candidate branch for any possible updates to the RC:
|
||||
`git checkout -b RC1/v0.34.0; git push origin RC1/v0.34.0`
|
||||
|
||||
## Testing
|
||||
|
||||
|
||||
@@ -1,4 +1,14 @@
|
||||
FROM alpine:3.9
|
||||
# stage 1 Generate Tendermint Binary
|
||||
FROM golang:1.15-alpine as builder
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk --no-cache add make
|
||||
COPY / /tendermint
|
||||
WORKDIR /tendermint
|
||||
RUN make build-linux
|
||||
|
||||
# stage 2
|
||||
FROM golang:1.15-alpine
|
||||
LABEL maintainer="hello@tendermint.com"
|
||||
|
||||
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json
|
||||
@@ -29,18 +39,17 @@ EXPOSE 26656 26657 26660
|
||||
|
||||
STOPSIGNAL SIGTERM
|
||||
|
||||
ARG BINARY=tendermint
|
||||
COPY $BINARY /usr/bin/tendermint
|
||||
COPY --from=builder /tendermint/build/tendermint /usr/bin/tendermint
|
||||
|
||||
# You can overwrite these before the first run to influence
|
||||
# config.json and genesis.json. Additionally, you can override
|
||||
# CMD to add parameters to `tendermint node`.
|
||||
ENV PROXY_APP=kvstore MONIKER=dockernode CHAIN_ID=dockerchain
|
||||
|
||||
COPY ./docker-entrypoint.sh /usr/local/bin/
|
||||
COPY ./DOCKER/docker-entrypoint.sh /usr/local/bin/
|
||||
|
||||
ENTRYPOINT ["docker-entrypoint.sh"]
|
||||
CMD ["node"]
|
||||
CMD ["start"]
|
||||
|
||||
# Expose the data directory as a volume since there's mutable state in there
|
||||
VOLUME [ "$TMHOME" ]
|
||||
|
||||
@@ -32,7 +32,7 @@ A quick example of a built-in app and Tendermint core in one container.
|
||||
|
||||
```sh
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app=kvstore
|
||||
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy-app=kvstore
|
||||
```
|
||||
|
||||
## Local cluster
|
||||
|
||||
@@ -6,11 +6,11 @@ if [ ! -d "$TMHOME/config" ]; then
|
||||
tendermint init
|
||||
|
||||
sed -i \
|
||||
-e "s/^proxy_app\s*=.*/proxy_app = \"$PROXY_APP\"/" \
|
||||
-e "s/^proxy-app\s*=.*/proxy-app = \"$PROXY_APP\"/" \
|
||||
-e "s/^moniker\s*=.*/moniker = \"$MONIKER\"/" \
|
||||
-e 's/^addr_book_strict\s*=.*/addr_book_strict = false/' \
|
||||
-e 's/^timeout_commit\s*=.*/timeout_commit = "500ms"/' \
|
||||
-e 's/^index_all_tags\s*=.*/index_all_tags = true/' \
|
||||
-e 's/^addr-book-strict\s*=.*/addr-book-strict = false/' \
|
||||
-e 's/^timeout-commit\s*=.*/timeout-commit = "500ms"/' \
|
||||
-e 's/^index-all-tags\s*=.*/index-all-tags = true/' \
|
||||
-e 's,^laddr = "tcp://127.0.0.1:26657",laddr = "tcp://0.0.0.0:26657",' \
|
||||
-e 's/^prometheus\s*=.*/prometheus = true/' \
|
||||
"$TMHOME/config/config.toml"
|
||||
|
||||
89
Makefile
89
Makefile
@@ -1,8 +1,18 @@
|
||||
#!/usr/bin/make -f
|
||||
|
||||
PACKAGES=$(shell go list ./...)
|
||||
OUTPUT?=build/tendermint
|
||||
BUILDDIR ?= $(CURDIR)/build
|
||||
|
||||
BUILD_TAGS?=tendermint
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`
|
||||
|
||||
# If building a release, please checkout the version tag to get the correct version setting
|
||||
ifneq ($(shell git symbolic-ref -q --short HEAD),)
|
||||
VERSION := unreleased-$(shell git symbolic-ref -q --short HEAD)-$(shell git rev-parse HEAD)
|
||||
else
|
||||
VERSION := $(shell git describe)
|
||||
endif
|
||||
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMCoreSemVer=$(VERSION)
|
||||
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
|
||||
HTTPS_GIT := https://github.com/tendermint/tendermint.git
|
||||
DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf
|
||||
@@ -49,21 +59,24 @@ all: check build test install
|
||||
.PHONY: all
|
||||
|
||||
# The below include contains the tools.
|
||||
include tools.mk
|
||||
include tests.mk
|
||||
include tools/Makefile
|
||||
include test/Makefile
|
||||
|
||||
###############################################################################
|
||||
### Build Tendermint ###
|
||||
### Build Tendermint ###
|
||||
###############################################################################
|
||||
|
||||
build:
|
||||
CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o $(OUTPUT) ./cmd/tendermint/
|
||||
build: $(BUILDDIR)/
|
||||
CGO_ENABLED=$(CGO_ENABLED) go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o $(BUILDDIR)/ ./cmd/tendermint/
|
||||
.PHONY: build
|
||||
|
||||
install:
|
||||
CGO_ENABLED=$(CGO_ENABLED) go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
|
||||
.PHONY: install
|
||||
|
||||
$(BUILDDIR)/:
|
||||
mkdir -p $@
|
||||
|
||||
###############################################################################
|
||||
### Protobuf ###
|
||||
###############################################################################
|
||||
@@ -81,6 +94,7 @@ proto-gen:
|
||||
.PHONY: proto-gen
|
||||
|
||||
proto-gen-docker:
|
||||
@docker pull -q tendermintdev/docker-build-proto
|
||||
@echo "Generating Protobuf files"
|
||||
@docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto sh ./scripts/protocgen.sh
|
||||
.PHONY: proto-gen-docker
|
||||
@@ -114,6 +128,27 @@ install_abci:
|
||||
@go install -mod=readonly ./abci/cmd/...
|
||||
.PHONY: install_abci
|
||||
|
||||
###############################################################################
|
||||
### Privval Server ###
|
||||
###############################################################################
|
||||
|
||||
build_privval_server:
|
||||
@go build -mod=readonly -o $(BUILDDIR)/ -i ./cmd/priv_val_server/...
|
||||
.PHONY: build_privval_server
|
||||
|
||||
generate_test_cert:
|
||||
# generate self signing ceritificate authority
|
||||
@certstrap init --common-name "root CA" --expires "20 years"
|
||||
# generate server cerificate
|
||||
@certstrap request-cert -cn server -ip 127.0.0.1
|
||||
# self-sign server cerificate with rootCA
|
||||
@certstrap sign server --CA "root CA"
|
||||
# generate client cerificate
|
||||
@certstrap request-cert -cn client -ip 127.0.0.1
|
||||
# self-sign client cerificate with rootCA
|
||||
@certstrap sign client --CA "root CA"
|
||||
.PHONY: generate_test_cert
|
||||
|
||||
###############################################################################
|
||||
### Distribution ###
|
||||
###############################################################################
|
||||
@@ -142,7 +177,7 @@ draw_deps:
|
||||
|
||||
get_deps_bin_size:
|
||||
@# Copy of build recipe with additional flags to perform binary size analysis
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/ 2>&1))
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(BUILDDIR)/ ./cmd/tendermint/ 2>&1))
|
||||
@find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log
|
||||
@echo "Results can be found here: $(CURDIR)/deps_bin_size.log"
|
||||
.PHONY: get_deps_bin_size
|
||||
@@ -186,31 +221,23 @@ DESTINATION = ./index.html.md
|
||||
###############################################################################
|
||||
### Documentation ###
|
||||
###############################################################################
|
||||
|
||||
# todo remove once tendermint.com DNS is solved
|
||||
build-docs:
|
||||
cd docs && \
|
||||
while read p; do \
|
||||
(git checkout $${p} . && npm install && VUEPRESS_BASE="/$${p}/" npm run build) ; \
|
||||
mkdir -p ~/output/$${p} ; \
|
||||
cp -r .vuepress/dist/* ~/output/$${p}/ ; \
|
||||
cp ~/output/$${p}/index.html ~/output ; \
|
||||
@cd docs && \
|
||||
while read -r branch path_prefix; do \
|
||||
(git checkout $${branch} && npm install && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \
|
||||
mkdir -p ~/output/$${path_prefix} ; \
|
||||
cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \
|
||||
cp ~/output/$${path_prefix}/index.html ~/output ; \
|
||||
done < versions ;
|
||||
.PHONY: build-docs
|
||||
|
||||
sync-docs:
|
||||
cd ~/output && \
|
||||
echo "role_arn = ${DEPLOYMENT_ROLE_ARN}" >> /root/.aws/config ; \
|
||||
echo "CI job = ${CIRCLE_BUILD_URL}" >> version.html ; \
|
||||
aws s3 sync . s3://${WEBSITE_BUCKET} --profile terraform --delete ; \
|
||||
aws cloudfront create-invalidation --distribution-id ${CF_DISTRIBUTION_ID} --profile terraform --path "/*" ;
|
||||
.PHONY: sync-docs
|
||||
|
||||
###############################################################################
|
||||
### Docker image ###
|
||||
###############################################################################
|
||||
|
||||
build-docker: build-linux
|
||||
cp $(OUTPUT) DOCKER/tendermint
|
||||
cp $(BUILDDIR)/tendermint DOCKER/tendermint
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" DOCKER
|
||||
rm -rf DOCKER/tendermint
|
||||
.PHONY: build-docker
|
||||
@@ -264,3 +291,17 @@ endif
|
||||
contract-tests:
|
||||
dredd
|
||||
.PHONY: contract-tests
|
||||
|
||||
clean:
|
||||
rm -rf $(CURDIR)/artifacts/ $(BUILDDIR)/
|
||||
|
||||
build-reproducible:
|
||||
docker rm latest-build || true
|
||||
docker run --volume=$(CURDIR):/sources:ro \
|
||||
--env TARGET_PLATFORMS='linux/amd64 linux/arm64 darwin/amd64 windows/amd64' \
|
||||
--env APP=tendermint \
|
||||
--env COMMIT=$(shell git rev-parse --short=8 HEAD) \
|
||||
--env VERSION=$(shell git describe --tags) \
|
||||
--name latest-build cosmossdk/rbuilder:latest
|
||||
docker cp -a latest-build:/home/builder/artifacts/ $(CURDIR)/
|
||||
.PHONY: build-reproducible
|
||||
|
||||
158
PHILOSOPHY.md
158
PHILOSOPHY.md
@@ -1,158 +0,0 @@
|
||||
# Design goals
|
||||
|
||||
The design goals for Tendermint (and the SDK and related libraries) are:
|
||||
|
||||
* Simplicity and Legibility
|
||||
* Parallel performance, namely ability to utilize multicore architecture
|
||||
* Ability to evolve the codebase bug-free
|
||||
* Debuggability
|
||||
* Complete correctness that considers all edge cases, esp in concurrency
|
||||
* Future-proof modular architecture, message protocol, APIs, and encapsulation
|
||||
|
||||
|
||||
## Justification
|
||||
|
||||
Legibility is key to maintaining bug-free software as it evolves toward more
|
||||
optimizations, more ease of debugging, and additional features.
|
||||
|
||||
It is too easy to introduce bugs over time by replacing lines of code with
|
||||
those that may panic, which means ideally locks are unlocked by defer
|
||||
statements.
|
||||
|
||||
For example,
|
||||
|
||||
```go
|
||||
func (obj *MyObj) something() {
|
||||
mtx.Lock()
|
||||
obj.something = other
|
||||
mtx.Unlock()
|
||||
}
|
||||
```
|
||||
|
||||
It is too easy to refactor the codebase in the future to replace `other` with
|
||||
`other.String()` for example, and this may introduce a bug that causes a
|
||||
deadlock. So as much as reasonably possible, we need to be using defer
|
||||
statements, even though it introduces additional overhead.
|
||||
|
||||
If it is necessary to optimize the unlocking of mutex locks, the solution is
|
||||
more modularity via smaller functions, so that defer'd unlocks are scoped
|
||||
within a smaller function.
|
||||
|
||||
Similarly, idiomatic for-loops should always be preferred over those that use
|
||||
custom counters, because it is too easy to evolve the body of a for-loop to
|
||||
become more complicated over time, and it becomes more and more difficult to
|
||||
assess the correctness of such a for-loop by visual inspection.
|
||||
|
||||
|
||||
## On performance
|
||||
|
||||
It doesn't matter whether there are alternative implementations that are 2x or
|
||||
3x more performant, when the software doesn't work, deadlocks, or if bugs
|
||||
cannot be debugged. By taking advantage of multicore concurrency, the
|
||||
Tendermint implementation will at least be an order of magnitude within the
|
||||
range of what is theoretically possible. The design philosophy of Tendermint,
|
||||
and the choice of Go as implementation language, is designed to make Tendermint
|
||||
implementation the standard specification for concurrent BFT software.
|
||||
|
||||
By focusing on the message protocols (e.g. ABCI, p2p messages), and
|
||||
encapsulation e.g. IAVL module, (relatively) independent reactors, we are both
|
||||
implementing a standard implementation to be used as the specification for
|
||||
future implementations in more optimizable languages like Rust, Java, and C++;
|
||||
as well as creating sufficiently performant software. Tendermint Core will
|
||||
never be as fast as future implementations of the Tendermint Spec, because Go
|
||||
isn't designed to be as fast as possible. The advantage of using Go is that we
|
||||
can develop the whole stack of modular components **faster** than in other
|
||||
languages.
|
||||
|
||||
Furthermore, the real bottleneck is in the application layer, and it isn't
|
||||
necessary to support more than a sufficiently decentralized set of validators
|
||||
(e.g. 100 ~ 300 validators is sufficient, with delegated bonded PoS).
|
||||
|
||||
Instead of optimizing Tendermint performance down to the metal, lets focus on
|
||||
optimizing on other matters, namely ability to push feature complete software
|
||||
that works well enough, can be debugged and maintained, and can serve as a spec
|
||||
for future implementations.
|
||||
|
||||
|
||||
## On encapsulation
|
||||
|
||||
In order to create maintainable, forward-optimizable software, it is critical
|
||||
to develop well-encapsulated objects that have well understood properties, and
|
||||
to re-use these easy-to-use-correctly components as building blocks for further
|
||||
encapsulated meta-objects.
|
||||
|
||||
For example, mutexes are cheap enough for Tendermint's design goals when there
|
||||
isn't goroutine contention, so it is encouraged to create concurrency safe
|
||||
structures with struct-level mutexes. If they are used in the context of
|
||||
non-concurrent logic, then the performance is good enough. If they are used in
|
||||
the context of concurrent logic, then it will still perform correctly.
|
||||
|
||||
Examples of this design principle can be seen in the types.ValidatorSet struct,
|
||||
and the rand.Rand struct. It's one single struct declaration that can be used
|
||||
in both concurrent and non-concurrent logic, and due to its well encapsulation,
|
||||
it's easy to get the usage of the mutex right.
|
||||
|
||||
### example: rand.Rand
|
||||
|
||||
`The default Source is safe for concurrent use by multiple goroutines, but
|
||||
Sources created by NewSource are not`. The reason why the default
|
||||
package-level source is safe for concurrent use is because it is protected (see
|
||||
`lockedSource` in <https://golang.org/src/math/rand/rand.go>).
|
||||
|
||||
But we shouldn't rely on the global source, we should be creating our own
|
||||
Rand/Source instances and using them, especially for determinism in testing.
|
||||
So it is reasonable to have rand.Rand be protected by a mutex. Whether we want
|
||||
our own implementation of Rand is another question, but the answer there is
|
||||
also in the affirmative. Sometimes you want to know where Rand is being used
|
||||
in your code, so it becomes a simple matter of dropping in a log statement to
|
||||
inject inspectability into Rand usage. Also, it is nice to be able to extend
|
||||
the functionality of Rand with custom methods. For these reasons, and for the
|
||||
reasons which is outlined in this design philosophy document, we should
|
||||
continue to use the rand.Rand object, with mutex protection.
|
||||
|
||||
Another key aspect of good encapsulation is the choice of exposed vs unexposed
|
||||
methods. It should be clear to the reader of the code, which methods are
|
||||
intended to be used in what context, and what safe usage is. Part of this is
|
||||
solved by hiding methods via unexported methods. Another part of this is
|
||||
naming conventions on the methods (e.g. underscores) with good documentation,
|
||||
and code organization. If there are too many exposed methods and it isn't
|
||||
clear what methods have what side effects, then there is something wrong about
|
||||
the design of abstractions that should be revisited.
|
||||
|
||||
|
||||
## On concurrency
|
||||
|
||||
In order for Tendermint to remain relevant in the years to come, it is vital
|
||||
for Tendermint to take advantage of multicore architectures. Due to the nature
|
||||
of the problem, namely consensus across a concurrent p2p gossip network, and to
|
||||
handle RPC requests for a large number of consuming subscribers, it is
|
||||
unavoidable for Tendermint development to require expertise in concurrency
|
||||
design, especially when it comes to the reactor design, and also for RPC
|
||||
request handling.
|
||||
|
||||
|
||||
# Guidelines
|
||||
|
||||
Here are some guidelines for designing for (sufficient) performance and concurrency:
|
||||
|
||||
* Mutex locks are cheap enough when there isn't contention.
|
||||
* Do not optimize code without analytical or observed proof that it is in a hot path.
|
||||
* Don't over-use channels when mutex locks w/ encapsulation are sufficient.
|
||||
* The need to drain channels are often a hint of unconsidered edge cases.
|
||||
* The creation of O(N) one-off goroutines is generally technical debt that
|
||||
needs to get addressed sooner than later. Avoid creating too many
|
||||
goroutines as a patch around incomplete concurrency design, or at least be
|
||||
aware of the debt and do not invest in the debt. On the other hand, Tendermint
|
||||
is designed to have a limited number of peers (e.g. 10 or 20), so the creation
|
||||
of O(C) goroutines per O(P) peers is still O(C\*P=constant).
|
||||
* Use defer statements to unlock as much as possible. If you want to unlock sooner,
|
||||
try to create more modular functions that do make use of defer statements.
|
||||
|
||||
# Mantras
|
||||
|
||||
* Premature optimization kills
|
||||
* Readability is paramount
|
||||
* Beautiful is better than fast.
|
||||
* In the face of ambiguity, refuse the temptation to guess.
|
||||
* In the face of bugs, refuse the temptation to cover the bug.
|
||||
* There should be one-- and preferably only one --obvious way to do it.
|
||||
34
README.md
34
README.md
@@ -8,15 +8,15 @@ Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for shor
|
||||
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
[](https://pkg.go.dev/github.com/tendermint/tendermint)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://discord.gg/AzefAFd)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://discord.gg/vcExX9T)
|
||||
[](https://github.com/tendermint/tendermint/blob/master/LICENSE)
|
||||
[](https://github.com/tendermint/tendermint)
|
||||
[](https://sourcegraph.com/github.com/tendermint/tendermint?badge)
|
||||
|
||||
| Branch | Tests | Coverage | Linting |
|
||||
| ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------- |
|
||||
| master | [](https://circleci.com/gh/tendermint/tendermint/tree/master) </br>  | [](https://codecov.io/gh/tendermint/tendermint) |  |
|
||||
| Branch | Tests | Coverage | Linting |
|
||||
|--------|--------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------|
|
||||
| master |  | [](https://codecov.io/gh/tendermint/tendermint) |  |
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
|
||||
and securely replicates it on many machines.
|
||||
@@ -36,19 +36,22 @@ However, we are still making breaking changes to the protocol and the APIs and h
|
||||
See below for more details about [versioning](#versioning).
|
||||
|
||||
In any case, if you intend to run Tendermint in production, we're happy to help. You can
|
||||
contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/AzefAFd).
|
||||
contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/vcExX9T).
|
||||
|
||||
## Security
|
||||
|
||||
To report a security vulnerability, see our [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md)
|
||||
For examples of the kinds of bugs we're looking for, see [our security policy](SECURITY.md).
|
||||
|
||||
We also maintain a dedicated mailing list for security updates. We will only ever use this mailing list
|
||||
to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe [here](http://eepurl.com/gZ5hQD).
|
||||
|
||||
## Minimum requirements
|
||||
|
||||
| Requirement | Notes |
|
||||
| ----------- | ---------------- |
|
||||
| Go version | Go1.14 or higher |
|
||||
|-------------|------------------|
|
||||
| Go version | Go1.15 or higher |
|
||||
|
||||
## Documentation
|
||||
|
||||
@@ -93,7 +96,6 @@ CHANGELOG even if they don't lead to MINOR version bumps:
|
||||
- crypto
|
||||
- config
|
||||
- libs
|
||||
- bech32
|
||||
- bits
|
||||
- bytes
|
||||
- json
|
||||
@@ -155,4 +157,14 @@ Additional tooling can be found in [/docs/tools](/docs/tools).
|
||||
- [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)
|
||||
- [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
|
||||
- [Original Whitepaper: "Tendermint: Consensus Without Mining"](https://tendermint.com/static/docs/tendermint.pdf)
|
||||
- [Blog](https://blog.cosmos.network/tendermint/home)
|
||||
- [Tendermint Core Blog](https://medium.com/tendermint/tagged/tendermint-core)
|
||||
- [Cosmos Blog](https://blog.cosmos.network/tendermint/home)
|
||||
|
||||
## Join us!
|
||||
|
||||
Tendermint Core is maintained by [Interchain GmbH](https://interchain.berlin).
|
||||
If you'd like to work full-time on Tendermint Core, [we're hiring](https://interchain-gmbh.breezy.hr/p/682fb7e8a6f601-software-engineer-tendermint-core)!
|
||||
|
||||
Funding for Tendermint Core development comes primarily from the [Interchain Foundation](https://interchain.io),
|
||||
a Swiss non-profit. The Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the for-profit entity
|
||||
that also maintains [tendermint.com](https://tendermint.com).
|
||||
176
UPGRADING.md
176
UPGRADING.md
@@ -2,71 +2,104 @@
|
||||
|
||||
This guide provides instructions for upgrading to specific versions of Tendermint Core.
|
||||
|
||||
## Unreleased
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
* Added `AbciVersion` to `RequestInfo`. Applications should check that the ABCI version they expect is being used in order to avoid unimplemented changes errors.
|
||||
|
||||
* The method `SetOption` has been removed from the ABCI.Client interface. This feature was used in the early ABCI implementation's.
|
||||
|
||||
* Messages are written to a byte stream using uin64 length delimiters instead of int64.
|
||||
|
||||
### Config Changes
|
||||
|
||||
* `fast_sync = "v1"` is no longer supported. Please use `v2` instead.
|
||||
|
||||
* All config parameters are now hyphen-case (also known as kebab-case) instead of snake_case. Before restarting the node make sure
|
||||
you have updated all the variables in your `config.toml` file.
|
||||
|
||||
### CLI Changes
|
||||
|
||||
* If you had previously used `tendermint gen_node_key` to generate a new node
|
||||
key, keep in mind that it no longer saves the output to a file. You can use
|
||||
`tendermint init` or pipe the output of `tendermint gen_node_key` to
|
||||
`$TMHOME/config/node_key.json`:
|
||||
|
||||
```
|
||||
$ tendermint gen_node_key > $TMHOME/config/node_key.json
|
||||
```
|
||||
|
||||
* CLI commands and flags are all now hyphen-case instead of snake_case.
|
||||
Make sure to adjust any scripts that calls a cli command with snake_casing
|
||||
## v0.34.0
|
||||
|
||||
**Upgrading to Tendermint 0.34 requires a blockchain restart.**
|
||||
This release is not compatible with previous blockchains due to changes to
|
||||
the encoding format (see "Protocol Buffers," below) and the block header (see "Blockchain Protocol").
|
||||
|
||||
Note also that Tendermint 0.34 also requires Go 1.15 or higher.
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
* New ABCI methods (`ListSnapshots`, `LoadSnapshotChunk`, `OfferSnapshot`, and `ApplySnapshotChunk`)
|
||||
were added to support the new State Sync feature.
|
||||
Previously, syncing a new node to a preexisting network could take days; but with State Sync,
|
||||
new nodes are able to join a network in a matter of seconds.
|
||||
Read [the spec](https://docs.tendermint.com/master/spec/abci/apps.html#state-sync)
|
||||
if you want to learn more about State Sync, or if you'd like your application to use it.
|
||||
(If you don't want to support State Sync in your application, you can just implement these new
|
||||
ABCI methods as no-ops, leaving them empty.)
|
||||
* The `ABCIVersion` is now `0.17.0`.
|
||||
|
||||
* `KV.Pair` has been replaced with `abci.EventAttribute`. The `EventAttribute.Index` field
|
||||
* New ABCI methods (`ListSnapshots`, `LoadSnapshotChunk`, `OfferSnapshot`, and `ApplySnapshotChunk`)
|
||||
were added to support the new State Sync feature.
|
||||
Previously, syncing a new node to a preexisting network could take days; but with State Sync,
|
||||
new nodes are able to join a network in a matter of seconds.
|
||||
Read [the spec](https://docs.tendermint.com/master/spec/abci/apps.html#state-sync)
|
||||
if you want to learn more about State Sync, or if you'd like your application to use it.
|
||||
(If you don't want to support State Sync in your application, you can just implement these new
|
||||
ABCI methods as no-ops, leaving them empty.)
|
||||
|
||||
* `KV.Pair` has been replaced with `abci.EventAttribute`. The `EventAttribute.Index` field
|
||||
allows ABCI applications to dictate which events should be indexed.
|
||||
|
||||
* The blockchain can now start from an arbitrary initial height,
|
||||
* The blockchain can now start from an arbitrary initial height,
|
||||
provided to the application via `RequestInitChain.InitialHeight`.
|
||||
|
||||
* ABCI evidence type is now an enum with two recognized types of evidence:
|
||||
`DUPLICATE_VOTE` and `LIGHT_CLIENT_ATTACK`.
|
||||
Applications should be able to handle these evidence types
|
||||
* ABCI evidence type is now an enum with two recognized types of evidence:
|
||||
`DUPLICATE_VOTE` and `LIGHT_CLIENT_ATTACK`.
|
||||
Applications should be able to handle these evidence types
|
||||
(i.e., through slashing or other accountability measures).
|
||||
|
||||
* The [`PublicKey` type](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/crypto/keys.proto#L13-L15)
|
||||
(used in ABCI as part of `ValidatorUpdate`) now uses a `oneof` protobuf type.
|
||||
Note that since Tendermint only supports ed25519 validator keys, there's only one
|
||||
* The [`PublicKey` type](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/crypto/keys.proto#L13-L15)
|
||||
(used in ABCI as part of `ValidatorUpdate`) now uses a `oneof` protobuf type.
|
||||
Note that since Tendermint only supports ed25519 validator keys, there's only one
|
||||
option in the `oneof`. For more, see "Protocol Buffers," below.
|
||||
|
||||
* The field `Proof`, on the ABCI type `ResponseQuery`, is now named `ProofOps`.
|
||||
For more, see "Crypto," below.
|
||||
* The field `Proof`, on the ABCI type `ResponseQuery`, is now named `ProofOps`.
|
||||
For more, see "Crypto," below.
|
||||
|
||||
* The method `SetOption` has been removed from the ABCI.Client interface. This feature was used in the early ABCI implementation's.
|
||||
|
||||
### P2P Protocol
|
||||
|
||||
The default codec is now proto3, not amino. The schema files can be found in the `/proto`
|
||||
directory. For more, see "Protobuf," below.
|
||||
directory. For more, see "Protobuf," below.
|
||||
|
||||
### Blockchain Protocol
|
||||
|
||||
* `Header#LastResultsHash` previously was the root hash of a Merkle tree built from `ResponseDeliverTx(Code, Data)` responses.
|
||||
As of 0.34,`Header#LastResultsHash` is now the root hash of a Merkle tree built from:
|
||||
* `BeginBlock#Events`
|
||||
* Root hash of a Merkle tree built from `ResponseDeliverTx(Code, Data,
|
||||
GasWanted, GasUsed, Events)` responses
|
||||
* `BeginBlock#Events`
|
||||
* `Header#LastResultsHash`, which is the root hash of a Merkle tree built from
|
||||
`ResponseDeliverTx(Code, Data)` as of v0.34 also includes `GasWanted` and `GasUsed`
|
||||
fields.
|
||||
|
||||
* Merkle hashes of empty trees previously returned nothing, but now return the hash of an empty input,
|
||||
to conform with [RFC-6962](https://tools.ietf.org/html/rfc6962).
|
||||
to conform with [RFC-6962](https://tools.ietf.org/html/rfc6962).
|
||||
This mainly affects `Header#DataHash`, `Header#LastResultsHash`, and
|
||||
`Header#EvidenceHash`, which are often empty. Non-empty hashes can also be affected, e.g. if their
|
||||
inputs depend on other (empty) Merkle hashes, giving different results.
|
||||
|
||||
### Transaction Indexing
|
||||
|
||||
Tendermint now relies on the application to tell it which transactions to index. This means that
|
||||
in the `config.toml`, generated by Tendermint, there is no longer a way to specify which
|
||||
transactions to index. `tx.height` & `tx.hash` will always be indexed when using the `kv` indexer.
|
||||
Tendermint now relies on the application to tell it which transactions to index. This means that
|
||||
in the `config.toml`, generated by Tendermint, there is no longer a way to specify which
|
||||
transactions to index. `tx.height` and `tx.hash` will always be indexed when using the `kv` indexer.
|
||||
|
||||
Applications must now choose to either a) enable indexing for all transactions, or
|
||||
Applications must now choose to either a) enable indexing for all transactions, or
|
||||
b) allow node operators to decide which transactions to index.
|
||||
Applications can notify Tendermint to index a specific transaction by setting
|
||||
Applications can notify Tendermint to index a specific transaction by setting
|
||||
`Index: bool` to `true` in the Event Attribute:
|
||||
|
||||
```go
|
||||
@@ -82,19 +115,19 @@ Applications can notify Tendermint to index a specific transaction by setting
|
||||
|
||||
### Protocol Buffers
|
||||
|
||||
Tendermint 0.34 replaces Amino with Protocol Buffers for encoding.
|
||||
This migration is extensive and results in a number of changes, however,
|
||||
Tendermint 0.34 replaces Amino with Protocol Buffers for encoding.
|
||||
This migration is extensive and results in a number of changes, however,
|
||||
Tendermint only uses the types generated from Protocol Buffers for disk and
|
||||
wire serialization.
|
||||
wire serialization.
|
||||
**This means that these changes should not affect you as a Tendermint user.**
|
||||
|
||||
However, Tendermint users and contributors may note the following changes:
|
||||
|
||||
* Directory layout changes: All proto files have been moved under one directory, `/proto`.
|
||||
This is in line with the recommended file layout by [Buf](https://buf.build).
|
||||
* Directory layout changes: All proto files have been moved under one directory, `/proto`.
|
||||
This is in line with the recommended file layout by [Buf](https://buf.build).
|
||||
For more, see the [Buf documentation](https://buf.build/docs/lint-checkers#file_layout).
|
||||
* ABCI Changes: As noted in the "ABCI Changes" section above, the `PublicKey` type now uses
|
||||
a `oneof` type.
|
||||
* ABCI Changes: As noted in the "ABCI Changes" section above, the `PublicKey` type now uses
|
||||
a `oneof` type.
|
||||
|
||||
For more on the Protobuf changes, please see our [blog post on this migration](https://medium.com/tendermint/tendermint-0-34-protocol-buffers-and-you-8c40558939ae).
|
||||
|
||||
@@ -108,36 +141,33 @@ Tendermint 0.34 includes new and updated consensus parameters.
|
||||
|
||||
#### Evidence Parameters
|
||||
|
||||
* `MaxNum`, which caps the total amount of evidence by a absolute number. The default is 50.
|
||||
* `MaxBytes`, which caps the total amount of evidence. The default is 1048576 (1 MB).
|
||||
|
||||
### Crypto
|
||||
|
||||
#### Keys
|
||||
|
||||
* Keys no longer include a type prefix. For example, ed25519 pubkeys have been renamed from
|
||||
`PubKeyEd25519` to `PubKey`. This reduces stutter (e.g., `ed25519.PubKey`).
|
||||
* Keys no longer include a type prefix. For example, ed25519 pubkeys have been renamed from
|
||||
`PubKeyEd25519` to `PubKey`. This reduces stutter (e.g., `ed25519.PubKey`).
|
||||
* Keys are now byte slices (`[]byte`) instead of byte arrays (`[<size>]byte`).
|
||||
* The multisig functionality that was previously in Tendermint now has
|
||||
a new home within the Cosmos SDK:
|
||||
* The multisig functionality that was previously in Tendermint now has
|
||||
a new home within the Cosmos SDK:
|
||||
[`cosmos/cosmos-sdk/types/multisig`](https://github.com/cosmos/cosmos-sdk/blob/master/crypto/types/multisig/multisignature.go).
|
||||
* Similarly, secp256k1 has been removed from the Tendermint repo.
|
||||
There is still [a secp256k1 implementation in the Cosmos SDK](https://github.com/cosmos/cosmos-sdk/tree/443e0c1f89bd3730a731aea30453bd732f7efa35/crypto/keys/secp256k1),
|
||||
and we recommend you use that package for all your secp256k1 needs.
|
||||
|
||||
#### `merkle` Package
|
||||
|
||||
* `SimpleHashFromMap()` and `SimpleProofsFromMap()` were removed.
|
||||
* The prefix `Simple` has been removed. (For example, `SimpleProof` is now called `Proof`.)
|
||||
* All protobuf messages have been moved to the `/proto` directory.
|
||||
* The protobuf message `Proof` that contained multiple ProofOp's has been renamed to `ProofOps`.
|
||||
As noted above, this affects the ABCI type `ResponseQuery`:
|
||||
* The prefix `Simple` has been removed. (For example, `SimpleProof` is now called `Proof`.)
|
||||
* All protobuf messages have been moved to the `/proto` directory.
|
||||
* The protobuf message `Proof` that contained multiple ProofOp's has been renamed to `ProofOps`.
|
||||
As noted above, this affects the ABCI type `ResponseQuery`:
|
||||
The field that was named Proof is now named `ProofOps`.
|
||||
* `HashFromByteSlices` and `ProofsFromByteSlices` now return a hash for empty inputs, to conform with
|
||||
[RFC-6962](https://tools.ietf.org/html/rfc6962).
|
||||
|
||||
### `libs` Package
|
||||
|
||||
The `bech32` package has moved to the Cosmos SDK:
|
||||
The `bech32` package has moved to the Cosmos SDK:
|
||||
[`cosmos/cosmos-sdk/types/bech32`](https://github.com/cosmos/cosmos-sdk/tree/4173ea5ebad906dd9b45325bed69b9c655504867/types/bech32).
|
||||
|
||||
### CLI
|
||||
@@ -147,40 +177,56 @@ See [the docs](https://docs.tendermint.com/master/tendermint-core/light-client-p
|
||||
|
||||
### Light Client
|
||||
|
||||
We have a new, rewritten light client! You can
|
||||
We have a new, rewritten light client! You can
|
||||
[read more](https://medium.com/tendermint/everything-you-need-to-know-about-the-tendermint-light-client-f80d03856f98)
|
||||
about the justifications and details behind this change.
|
||||
about the justifications and details behind this change.
|
||||
|
||||
Other user-relevant changes include:
|
||||
|
||||
* The old `lite` package was removed; the new light client uses the `light` package.
|
||||
* The `Verifier` was broken up into two pieces:
|
||||
* Core verification logic (pure `VerifyX` functions)
|
||||
* The `Verifier` was broken up into two pieces:
|
||||
* Core verification logic (pure `VerifyX` functions)
|
||||
* `Client` object, which represents the complete light client
|
||||
* The RPC client can be found in the `/rpc` directory.
|
||||
* The new light clients stores headers & validator sets as `LightBlock`s
|
||||
* The RPC client can be found in the `/rpc` directory.
|
||||
* The HTTP(S) proxy is located in the `/proxy` directory.
|
||||
|
||||
### `state` Package
|
||||
|
||||
* A new field `State.InitialHeight` has been added to record the initial chain height, which must be `1`
|
||||
(not `0`) if starting from height `1`. This can be configured via the genesis field `initial_height`.
|
||||
* The `state` package now has a `Store` interface. All functions in
|
||||
[state/store.go](https://github.com/tendermint/tendermint/blob/56911ee35298191c95ef1c7d3d5ec508237aaff4/state/store.go#L42-L42)
|
||||
* The `state` package now has a `Store` interface. All functions in
|
||||
[state/store.go](https://github.com/tendermint/tendermint/blob/56911ee35298191c95ef1c7d3d5ec508237aaff4/state/store.go#L42-L42)
|
||||
are now part of the interface. The interface returns errors on all methods and can be used by calling `state.NewStore(dbm.DB)`.
|
||||
|
||||
### `privval` Package
|
||||
|
||||
All requests are now accompanied by the chain ID from the network.
|
||||
This is a optional field and can be ignored by key management systems.
|
||||
It is recommended to check the chain ID if using the same key management system for multiple chains.
|
||||
This is a optional field and can be ignored by key management systems;
|
||||
however, if you are using the same key management system for multiple different
|
||||
blockchains, we recommend that you check the chain ID.
|
||||
|
||||
|
||||
### RPC
|
||||
|
||||
`/unsafe_start_cpu_profiler`, `/unsafe_stop_cpu_profiler` and
|
||||
`/unsafe_write_heap_profile` were removed.
|
||||
For profiling, please use the pprof server, which can
|
||||
be enabled through `--rpc.pprof_laddr=X` flag or `pprof_laddr=X` config setting
|
||||
in the rpc section.
|
||||
* `/unsafe_start_cpu_profiler`, `/unsafe_stop_cpu_profiler` and
|
||||
`/unsafe_write_heap_profile` were removed.
|
||||
For profiling, please use the pprof server, which can
|
||||
be enabled through `--rpc.pprof_laddr=X` flag or `pprof_laddr=X` config setting
|
||||
in the rpc section.
|
||||
* The `Content-Type` header returned on RPC calls is now (correctly) set as `application/json`.
|
||||
|
||||
### Version
|
||||
|
||||
Version is now set through Go linker flags `ld_flags`. Applications that are using tendermint as a library should set this at compile time.
|
||||
|
||||
Example:
|
||||
|
||||
```sh
|
||||
go install -mod=readonly -ldflags "-X github.com/tendermint/tendermint/version.TMCoreSemVer=$(go list -m github.com/tendermint/tendermint | sed 's/ /\@/g') -s -w " -trimpath ./cmd
|
||||
```
|
||||
|
||||
Additionally, the exported constant `version.Version` is now `version.TMCoreSemVer`.
|
||||
|
||||
## v0.33.4
|
||||
|
||||
|
||||
66
Vagrantfile
vendored
66
Vagrantfile
vendored
@@ -1,66 +0,0 @@
|
||||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
|
||||
Vagrant.configure("2") do |config|
|
||||
config.vm.box = "ubuntu/focal64"
|
||||
|
||||
config.vm.provider "virtualbox" do |v|
|
||||
v.memory = 4096
|
||||
v.cpus = 2
|
||||
end
|
||||
|
||||
config.vm.provision "shell", inline: <<-SHELL
|
||||
apt-get update
|
||||
|
||||
# install base requirements
|
||||
apt-get install -y --no-install-recommends wget curl jq zip \
|
||||
make shellcheck bsdmainutils psmisc
|
||||
apt-get install -y language-pack-en
|
||||
|
||||
# install docker
|
||||
apt-get install -y --no-install-recommends apt-transport-https \
|
||||
ca-certificates \
|
||||
curl \
|
||||
gnupg-agent \
|
||||
software-properties-common
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
|
||||
add-apt-repository \
|
||||
"deb [arch=amd64] https://download.docker.com/linux/ubuntu \
|
||||
$(lsb_release -cs) \
|
||||
stable"
|
||||
apt-get update
|
||||
apt-get install -y docker-ce
|
||||
usermod -aG docker vagrant
|
||||
|
||||
# install go
|
||||
wget -q https://dl.google.com/go/go1.14.linux-amd64.tar.gz
|
||||
tar -xvf go1.14.linux-amd64.tar.gz
|
||||
mv go /usr/local
|
||||
rm -f go1.14.linux-amd64.tar.gz
|
||||
|
||||
# install nodejs (for docs)
|
||||
curl -sL https://deb.nodesource.com/setup_11.x | bash -
|
||||
apt-get install -y nodejs
|
||||
|
||||
# cleanup
|
||||
apt-get autoremove -y
|
||||
|
||||
# set env variables
|
||||
echo 'export GOROOT=/usr/local/go' >> /home/vagrant/.bash_profile
|
||||
echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.bash_profile
|
||||
echo 'export PATH=$PATH:$GOROOT/bin:$GOPATH/bin' >> /home/vagrant/.bash_profile
|
||||
echo 'export LC_ALL=en_US.UTF-8' >> /home/vagrant/.bash_profile
|
||||
echo 'cd go/src/github.com/tendermint/tendermint' >> /home/vagrant/.bash_profile
|
||||
|
||||
mkdir -p /home/vagrant/go/bin
|
||||
mkdir -p /home/vagrant/go/src/github.com/tendermint
|
||||
ln -s /vagrant /home/vagrant/go/src/github.com/tendermint/tendermint
|
||||
|
||||
chown -R vagrant:vagrant /home/vagrant/go
|
||||
chown vagrant:vagrant /home/vagrant/.bash_profile
|
||||
|
||||
# get all deps and tools, ready to install/test
|
||||
su - vagrant -c 'source /home/vagrant/.bash_profile'
|
||||
su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make tools'
|
||||
SHELL
|
||||
end
|
||||
@@ -20,7 +20,7 @@ To get up and running quickly, see the [getting started guide](../docs/app-dev/g
|
||||
A detailed description of the ABCI methods and message types is contained in:
|
||||
|
||||
- [The main spec](https://github.com/tendermint/spec/blob/master/spec/abci/abci.md)
|
||||
- [A protobuf file](./types/types.proto)
|
||||
- [A protobuf file](../proto/tendermint/abci/types.proto)
|
||||
- [A Go interface](./types/application.go)
|
||||
|
||||
## Protocol Buffers
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
@@ -14,48 +15,53 @@ const (
|
||||
echoRetryIntervalSeconds = 1
|
||||
)
|
||||
|
||||
//go:generate mockery --case underscore --name Client
|
||||
|
||||
// Client defines an interface for an ABCI client.
|
||||
// All `Async` methods return a `ReqRes` object.
|
||||
//
|
||||
// All `Async` methods return a `ReqRes` object and an error.
|
||||
// All `Sync` methods return the appropriate protobuf ResponseXxx struct and an error.
|
||||
// Note these are client errors, eg. ABCI socket connectivity issues.
|
||||
// Application-related errors are reflected in response via ABCI error codes and logs.
|
||||
//
|
||||
// NOTE these are client errors, eg. ABCI socket connectivity issues.
|
||||
// Application-related errors are reflected in response via ABCI error codes
|
||||
// and logs.
|
||||
type Client interface {
|
||||
service.Service
|
||||
|
||||
SetResponseCallback(Callback)
|
||||
Error() error
|
||||
|
||||
FlushAsync() *ReqRes
|
||||
EchoAsync(msg string) *ReqRes
|
||||
InfoAsync(types.RequestInfo) *ReqRes
|
||||
SetOptionAsync(types.RequestSetOption) *ReqRes
|
||||
DeliverTxAsync(types.RequestDeliverTx) *ReqRes
|
||||
CheckTxAsync(types.RequestCheckTx) *ReqRes
|
||||
QueryAsync(types.RequestQuery) *ReqRes
|
||||
CommitAsync() *ReqRes
|
||||
InitChainAsync(types.RequestInitChain) *ReqRes
|
||||
BeginBlockAsync(types.RequestBeginBlock) *ReqRes
|
||||
EndBlockAsync(types.RequestEndBlock) *ReqRes
|
||||
ListSnapshotsAsync(types.RequestListSnapshots) *ReqRes
|
||||
OfferSnapshotAsync(types.RequestOfferSnapshot) *ReqRes
|
||||
LoadSnapshotChunkAsync(types.RequestLoadSnapshotChunk) *ReqRes
|
||||
ApplySnapshotChunkAsync(types.RequestApplySnapshotChunk) *ReqRes
|
||||
// Asynchronous requests
|
||||
FlushAsync(context.Context) (*ReqRes, error)
|
||||
EchoAsync(ctx context.Context, msg string) (*ReqRes, error)
|
||||
InfoAsync(context.Context, types.RequestInfo) (*ReqRes, error)
|
||||
DeliverTxAsync(context.Context, types.RequestDeliverTx) (*ReqRes, error)
|
||||
CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error)
|
||||
QueryAsync(context.Context, types.RequestQuery) (*ReqRes, error)
|
||||
CommitAsync(context.Context) (*ReqRes, error)
|
||||
InitChainAsync(context.Context, types.RequestInitChain) (*ReqRes, error)
|
||||
BeginBlockAsync(context.Context, types.RequestBeginBlock) (*ReqRes, error)
|
||||
EndBlockAsync(context.Context, types.RequestEndBlock) (*ReqRes, error)
|
||||
ListSnapshotsAsync(context.Context, types.RequestListSnapshots) (*ReqRes, error)
|
||||
OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error)
|
||||
LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error)
|
||||
ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error)
|
||||
|
||||
FlushSync() error
|
||||
EchoSync(msg string) (*types.ResponseEcho, error)
|
||||
InfoSync(types.RequestInfo) (*types.ResponseInfo, error)
|
||||
SetOptionSync(types.RequestSetOption) (*types.ResponseSetOption, error)
|
||||
DeliverTxSync(types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
|
||||
CheckTxSync(types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||
QuerySync(types.RequestQuery) (*types.ResponseQuery, error)
|
||||
CommitSync() (*types.ResponseCommit, error)
|
||||
InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
BeginBlockSync(types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
|
||||
EndBlockSync(types.RequestEndBlock) (*types.ResponseEndBlock, error)
|
||||
ListSnapshotsSync(types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
||||
OfferSnapshotSync(types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
||||
LoadSnapshotChunkSync(types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
||||
ApplySnapshotChunkSync(types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
||||
// Synchronous requests
|
||||
FlushSync(context.Context) error
|
||||
EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error)
|
||||
InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
|
||||
DeliverTxSync(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
|
||||
CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||
QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
|
||||
CommitSync(context.Context) (*types.ResponseCommit, error)
|
||||
InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
|
||||
EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error)
|
||||
ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
||||
OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
||||
LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
||||
ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
29
abci/client/doc.go
Normal file
29
abci/client/doc.go
Normal file
@@ -0,0 +1,29 @@
|
||||
// Package abcicli provides an ABCI implementation in Go.
|
||||
//
|
||||
// There are 3 clients available:
|
||||
// 1. socket (unix or TCP)
|
||||
// 2. local (in memory)
|
||||
// 3. gRPC
|
||||
//
|
||||
// ## Socket client
|
||||
//
|
||||
// async: the client maintains an internal buffer of a fixed size. when the
|
||||
// buffer becomes full, all Async calls will return an error immediately.
|
||||
//
|
||||
// sync: the client blocks on 1) enqueuing the Sync request 2) enqueuing the
|
||||
// Flush requests 3) waiting for the Flush response
|
||||
//
|
||||
// ## Local client
|
||||
//
|
||||
// async: global mutex is locked during each call (meaning it's not really async!)
|
||||
// sync: global mutex is locked during each call
|
||||
//
|
||||
// ## gRPC client
|
||||
//
|
||||
// async: gRPC is synchronous, but an internal buffer of a fixed size is used
|
||||
// to store responses and later call callbacks (separate goroutine per
|
||||
// response).
|
||||
//
|
||||
// sync: waits for all Async calls to complete (essentially what Flush does in
|
||||
// the socket client) and calls Sync method.
|
||||
package abcicli
|
||||
@@ -1,11 +1,12 @@
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
@@ -14,16 +15,14 @@ import (
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
var _ Client = (*grpcClient)(nil)
|
||||
|
||||
// A stripped copy of the remoteClient that makes
|
||||
// synchronous calls using grpc
|
||||
// A gRPC client.
|
||||
type grpcClient struct {
|
||||
service.BaseService
|
||||
mustConnect bool
|
||||
|
||||
client types.ABCIApplicationClient
|
||||
conn *grpc.ClientConn
|
||||
client types.ABCIApplicationClient
|
||||
conn *grpc.ClientConn
|
||||
chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool
|
||||
|
||||
mtx tmsync.Mutex
|
||||
addr string
|
||||
@@ -31,10 +30,29 @@ type grpcClient struct {
|
||||
resCb func(*types.Request, *types.Response) // listens to all callbacks
|
||||
}
|
||||
|
||||
var _ Client = (*grpcClient)(nil)
|
||||
|
||||
// NewGRPCClient creates a gRPC client, which will connect to addr upon the
|
||||
// start. Note Client#Start returns an error if connection is unsuccessful and
|
||||
// mustConnect is true.
|
||||
//
|
||||
// GRPC calls are synchronous, but some callbacks expect to be called
|
||||
// asynchronously (eg. the mempool expects to be able to lock to remove bad txs
|
||||
// from cache). To accommodate, we finish each call in its own go-routine,
|
||||
// which is expensive, but easy - if you want something better, use the socket
|
||||
// protocol! maybe one day, if people really want it, we use grpc streams, but
|
||||
// hopefully not :D
|
||||
func NewGRPCClient(addr string, mustConnect bool) Client {
|
||||
cli := &grpcClient{
|
||||
addr: addr,
|
||||
mustConnect: mustConnect,
|
||||
// Buffering the channel is needed to make calls appear asynchronous,
|
||||
// which is required when the caller makes multiple async calls before
|
||||
// processing callbacks (e.g. due to holding locks). 64 means that a
|
||||
// caller can make up to 64 async calls before a callback must be
|
||||
// processed (otherwise it deadlocks). It also means that we can make 64
|
||||
// gRPC calls while processing a slow callback at the channel head.
|
||||
chReqRes: make(chan *ReqRes, 64),
|
||||
}
|
||||
cli.BaseService = *service.NewBaseService(nil, "grpcClient", cli)
|
||||
return cli
|
||||
@@ -45,9 +63,36 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OnStart() error {
|
||||
if err := cli.BaseService.OnStart(); err != nil {
|
||||
return err
|
||||
}
|
||||
// This processes asynchronous request/response messages and dispatches
|
||||
// them to callbacks.
|
||||
go func() {
|
||||
// Use a separate function to use defer for mutex unlocks (this handles panics)
|
||||
callCb := func(reqres *ReqRes) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
reqres.SetDone()
|
||||
reqres.Done()
|
||||
|
||||
// Notify client listener if set
|
||||
if cli.resCb != nil {
|
||||
cli.resCb(reqres.Request, reqres.Response)
|
||||
}
|
||||
|
||||
// Notify reqRes listener if set
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(reqres.Response)
|
||||
}
|
||||
}
|
||||
for reqres := range cli.chReqRes {
|
||||
if reqres != nil {
|
||||
callCb(reqres)
|
||||
} else {
|
||||
cli.Logger.Error("Received nil reqres")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
RETRY_LOOP:
|
||||
for {
|
||||
conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
@@ -80,19 +125,18 @@ RETRY_LOOP:
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OnStop() {
|
||||
cli.BaseService.OnStop()
|
||||
|
||||
if cli.conn != nil {
|
||||
cli.conn.Close()
|
||||
}
|
||||
close(cli.chReqRes)
|
||||
}
|
||||
|
||||
func (cli *grpcClient) StopForError(err error) {
|
||||
cli.mtx.Lock()
|
||||
if !cli.IsRunning() {
|
||||
return
|
||||
}
|
||||
|
||||
cli.mtx.Lock()
|
||||
if cli.err == nil {
|
||||
cli.err = err
|
||||
}
|
||||
@@ -119,248 +163,344 @@ func (cli *grpcClient) SetResponseCallback(resCb Callback) {
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// GRPC calls are synchronous, but some callbacks expect to be called asynchronously
|
||||
// (eg. the mempool expects to be able to lock to remove bad txs from cache).
|
||||
// To accommodate, we finish each call in its own go-routine,
|
||||
// which is expensive, but easy - if you want something better, use the socket protocol!
|
||||
// maybe one day, if people really want it, we use grpc streams,
|
||||
// but hopefully not :D
|
||||
|
||||
func (cli *grpcClient) EchoAsync(msg string) *ReqRes {
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
|
||||
req := types.ToRequestEcho(msg)
|
||||
res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.Echo(ctx, req.GetEcho(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{Echo: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Echo{Echo: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) FlushAsync() *ReqRes {
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
req := types.ToRequestFlush()
|
||||
res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.Flush(ctx, req.GetFlush(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{Flush: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Flush{Flush: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) InfoAsync(ctx context.Context, params types.RequestInfo) (*ReqRes, error) {
|
||||
req := types.ToRequestInfo(params)
|
||||
res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.Info(ctx, req.GetInfo(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
|
||||
req := types.ToRequestSetOption(params)
|
||||
res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{SetOption: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
|
||||
req := types.ToRequestDeliverTx(params)
|
||||
res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.DeliverTx(ctx, req.GetDeliverTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTxAsync(params types.RequestCheckTx) *ReqRes {
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestCheckTx) (*ReqRes, error) {
|
||||
req := types.ToRequestCheckTx(params)
|
||||
res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.CheckTx(ctx, req.GetCheckTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) QueryAsync(ctx context.Context, params types.RequestQuery) (*ReqRes, error) {
|
||||
req := types.ToRequestQuery(params)
|
||||
res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.Query(ctx, req.GetQuery(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{Query: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Query{Query: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CommitAsync() *ReqRes {
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
req := types.ToRequestCommit()
|
||||
res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.Commit(ctx, req.GetCommit(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{Commit: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Commit{Commit: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) InitChainAsync(ctx context.Context, params types.RequestInitChain) (*ReqRes, error) {
|
||||
req := types.ToRequestInitChain(params)
|
||||
res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.InitChain(ctx, req.GetInitChain(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) BeginBlockAsync(ctx context.Context, params types.RequestBeginBlock) (*ReqRes, error) {
|
||||
req := types.ToRequestBeginBlock(params)
|
||||
res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) EndBlockAsync(ctx context.Context, params types.RequestEndBlock) (*ReqRes, error) {
|
||||
req := types.ToRequestEndBlock(params)
|
||||
res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ListSnapshotsAsync(params types.RequestListSnapshots) *ReqRes {
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params types.RequestListSnapshots) (*ReqRes, error) {
|
||||
req := types.ToRequestListSnapshots(params)
|
||||
res, err := cli.client.ListSnapshots(context.Background(), req.GetListSnapshots(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.ListSnapshots(ctx, req.GetListSnapshots(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ListSnapshots{ListSnapshots: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_ListSnapshots{ListSnapshots: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OfferSnapshotAsync(params types.RequestOfferSnapshot) *ReqRes {
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) OfferSnapshotAsync(ctx context.Context, params types.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
req := types.ToRequestOfferSnapshot(params)
|
||||
res, err := cli.client.OfferSnapshot(context.Background(), req.GetOfferSnapshot(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.OfferSnapshot(ctx, req.GetOfferSnapshot(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_OfferSnapshot{OfferSnapshot: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_OfferSnapshot{OfferSnapshot: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) LoadSnapshotChunkAsync(params types.RequestLoadSnapshotChunk) *ReqRes {
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) LoadSnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
params types.RequestLoadSnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
req := types.ToRequestLoadSnapshotChunk(params)
|
||||
res, err := cli.client.LoadSnapshotChunk(context.Background(), req.GetLoadSnapshotChunk(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.LoadSnapshotChunk(ctx, req.GetLoadSnapshotChunk(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ApplySnapshotChunkAsync(params types.RequestApplySnapshotChunk) *ReqRes {
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) ApplySnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
params types.RequestApplySnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
req := types.ToRequestApplySnapshotChunk(params)
|
||||
res, err := cli.client.ApplySnapshotChunk(context.Background(), req.GetApplySnapshotChunk(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.ApplySnapshotChunk(ctx, req.GetApplySnapshotChunk(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}})
|
||||
return cli.finishAsyncCall(
|
||||
ctx,
|
||||
req,
|
||||
&types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}},
|
||||
)
|
||||
}
|
||||
|
||||
func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes {
|
||||
// finishAsyncCall creates a ReqRes for an async call, and immediately populates it
|
||||
// with the response. We don't complete it until it's been ordered via the channel.
|
||||
func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request, res *types.Response) (*ReqRes, error) {
|
||||
reqres := NewReqRes(req)
|
||||
reqres.Response = res // Set response
|
||||
reqres.Done() // Release waiters
|
||||
reqres.SetDone() // so reqRes.SetCallback will run the callback
|
||||
reqres.Response = res
|
||||
select {
|
||||
case cli.chReqRes <- reqres: // use channel for async responses, since they must be ordered
|
||||
return reqres, nil
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// goroutine for callbacks
|
||||
go func() {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
// Notify client listener if set
|
||||
if cli.resCb != nil {
|
||||
cli.resCb(reqres.Request, res)
|
||||
}
|
||||
|
||||
// Notify reqRes listener if set
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(res)
|
||||
}
|
||||
}()
|
||||
|
||||
return reqres
|
||||
// finishSyncCall waits for an async call to complete. It is necessary to call all
|
||||
// sync calls asynchronously as well, to maintain call and response ordering via
|
||||
// the channel, and this method will wait until the async call completes.
|
||||
func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *types.Response {
|
||||
// It's possible that the callback is called twice, since the callback can
|
||||
// be called immediately on SetCallback() in addition to after it has been
|
||||
// set. This is because completing the ReqRes happens in a separate critical
|
||||
// section from the one where the callback is called: there is a race where
|
||||
// SetCallback() is called between completing the ReqRes and dispatching the
|
||||
// callback.
|
||||
//
|
||||
// We also buffer the channel with 1 response, since SetCallback() will be
|
||||
// called synchronously if the reqres is already completed, in which case
|
||||
// it will block on sending to the channel since it hasn't gotten around to
|
||||
// receiving from it yet.
|
||||
//
|
||||
// ReqRes should really handle callback dispatch internally, to guarantee
|
||||
// that it's only called once and avoid the above race conditions.
|
||||
var once sync.Once
|
||||
ch := make(chan *types.Response, 1)
|
||||
reqres.SetCallback(func(res *types.Response) {
|
||||
once.Do(func() {
|
||||
ch <- res
|
||||
})
|
||||
})
|
||||
return <-ch
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *grpcClient) FlushSync() error {
|
||||
func (cli *grpcClient) FlushSync(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
reqres := cli.EchoAsync(msg)
|
||||
// StopForError should already have been called if error is set
|
||||
return reqres.Response.GetEcho(), cli.Error()
|
||||
func (cli *grpcClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
reqres, err := cli.EchoAsync(ctx, msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetEcho(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
reqres := cli.InfoAsync(req)
|
||||
return reqres.Response.GetInfo(), cli.Error()
|
||||
func (cli *grpcClient) InfoSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInfo,
|
||||
) (*types.ResponseInfo, error) {
|
||||
reqres, err := cli.InfoAsync(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetInfo(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
reqres := cli.SetOptionAsync(req)
|
||||
return reqres.Response.GetSetOption(), cli.Error()
|
||||
func (cli *grpcClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
params types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
|
||||
reqres, err := cli.DeliverTxAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxSync(params types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.DeliverTxAsync(params)
|
||||
return reqres.Response.GetDeliverTx(), cli.Error()
|
||||
func (cli *grpcClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
params types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
|
||||
reqres, err := cli.CheckTxAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetCheckTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTxSync(params types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
reqres := cli.CheckTxAsync(params)
|
||||
return reqres.Response.GetCheckTx(), cli.Error()
|
||||
func (cli *grpcClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
reqres, err := cli.QueryAsync(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetQuery(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
reqres := cli.QueryAsync(req)
|
||||
return reqres.Response.GetQuery(), cli.Error()
|
||||
func (cli *grpcClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
reqres, err := cli.CommitAsync(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetCommit(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CommitSync() (*types.ResponseCommit, error) {
|
||||
reqres := cli.CommitAsync()
|
||||
return reqres.Response.GetCommit(), cli.Error()
|
||||
func (cli *grpcClient) InitChainSync(
|
||||
ctx context.Context,
|
||||
params types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
reqres, err := cli.InitChainAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetInitChain(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InitChainSync(params types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
reqres := cli.InitChainAsync(params)
|
||||
return reqres.Response.GetInitChain(), cli.Error()
|
||||
func (cli *grpcClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
reqres, err := cli.BeginBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetBeginBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) BeginBlockSync(params types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
reqres := cli.BeginBlockAsync(params)
|
||||
return reqres.Response.GetBeginBlock(), cli.Error()
|
||||
func (cli *grpcClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
reqres, err := cli.EndBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetEndBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EndBlockSync(params types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
reqres := cli.EndBlockAsync(params)
|
||||
return reqres.Response.GetEndBlock(), cli.Error()
|
||||
func (cli *grpcClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
params types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
reqres, err := cli.ListSnapshotsAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetListSnapshots(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ListSnapshotsSync(params types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
reqres := cli.ListSnapshotsAsync(params)
|
||||
return reqres.Response.GetListSnapshots(), cli.Error()
|
||||
}
|
||||
func (cli *grpcClient) OfferSnapshotSync(
|
||||
ctx context.Context,
|
||||
params types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
func (cli *grpcClient) OfferSnapshotSync(params types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
reqres := cli.OfferSnapshotAsync(params)
|
||||
return reqres.Response.GetOfferSnapshot(), cli.Error()
|
||||
reqres, err := cli.OfferSnapshotAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetOfferSnapshot(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) LoadSnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
params types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
reqres := cli.LoadSnapshotChunkAsync(params)
|
||||
return reqres.Response.GetLoadSnapshotChunk(), cli.Error()
|
||||
|
||||
reqres, err := cli.LoadSnapshotChunkAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetLoadSnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ApplySnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
params types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
reqres := cli.ApplySnapshotChunkAsync(params)
|
||||
return reqres.Response.GetApplySnapshotChunk(), cli.Error()
|
||||
|
||||
reqres, err := cli.ApplySnapshotChunkAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
package abcicli
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
)
|
||||
|
||||
var _ Client = (*localClient)(nil)
|
||||
|
||||
// NOTE: use defer to unlock mutex because Application might panic (e.g., in
|
||||
// case of malicious tx or query). It only makes sense for publicly exposed
|
||||
// methods like CheckTx (/broadcast_tx_* RPC endpoint) or Query (/abci_query
|
||||
@@ -20,6 +20,12 @@ type localClient struct {
|
||||
Callback
|
||||
}
|
||||
|
||||
var _ Client = (*localClient)(nil)
|
||||
|
||||
// NewLocalClient creates a local client, which will be directly calling the
|
||||
// methods of the given app.
|
||||
//
|
||||
// Both Async and Sync methods ignore the given context.Context parameter.
|
||||
func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client {
|
||||
if mtx == nil {
|
||||
mtx = new(tmsync.Mutex)
|
||||
@@ -43,22 +49,22 @@ func (app *localClient) Error() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *localClient) FlushAsync() *ReqRes {
|
||||
func (app *localClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
// Do nothing
|
||||
return newLocalReqRes(types.ToRequestFlush(), nil)
|
||||
return newLocalReqRes(types.ToRequestFlush(), nil), nil
|
||||
}
|
||||
|
||||
func (app *localClient) EchoAsync(msg string) *ReqRes {
|
||||
func (app *localClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.callback(
|
||||
types.ToRequestEcho(msg),
|
||||
types.ToResponseEcho(msg),
|
||||
)
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) InfoAsync(req types.RequestInfo) *ReqRes {
|
||||
func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -66,21 +72,10 @@ func (app *localClient) InfoAsync(req types.RequestInfo) *ReqRes {
|
||||
return app.callback(
|
||||
types.ToRequestInfo(req),
|
||||
types.ToResponseInfo(res),
|
||||
)
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.SetOption(req)
|
||||
return app.callback(
|
||||
types.ToRequestSetOption(req),
|
||||
types.ToResponseSetOption(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
func (app *localClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -88,10 +83,10 @@ func (app *localClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
return app.callback(
|
||||
types.ToRequestDeliverTx(params),
|
||||
types.ToResponseDeliverTx(res),
|
||||
)
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes {
|
||||
func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -99,10 +94,10 @@ func (app *localClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes {
|
||||
return app.callback(
|
||||
types.ToRequestCheckTx(req),
|
||||
types.ToResponseCheckTx(res),
|
||||
)
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) QueryAsync(req types.RequestQuery) *ReqRes {
|
||||
func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -110,10 +105,10 @@ func (app *localClient) QueryAsync(req types.RequestQuery) *ReqRes {
|
||||
return app.callback(
|
||||
types.ToRequestQuery(req),
|
||||
types.ToResponseQuery(res),
|
||||
)
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) CommitAsync() *ReqRes {
|
||||
func (app *localClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -121,10 +116,10 @@ func (app *localClient) CommitAsync() *ReqRes {
|
||||
return app.callback(
|
||||
types.ToRequestCommit(),
|
||||
types.ToResponseCommit(res),
|
||||
)
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
|
||||
func (app *localClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -132,10 +127,10 @@ func (app *localClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
|
||||
return app.callback(
|
||||
types.ToRequestInitChain(req),
|
||||
types.ToResponseInitChain(res),
|
||||
)
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
|
||||
func (app *localClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -143,10 +138,10 @@ func (app *localClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
|
||||
return app.callback(
|
||||
types.ToRequestBeginBlock(req),
|
||||
types.ToResponseBeginBlock(res),
|
||||
)
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
|
||||
func (app *localClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -154,10 +149,10 @@ func (app *localClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
|
||||
return app.callback(
|
||||
types.ToRequestEndBlock(req),
|
||||
types.ToResponseEndBlock(res),
|
||||
)
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes {
|
||||
func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -165,10 +160,10 @@ func (app *localClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqR
|
||||
return app.callback(
|
||||
types.ToRequestListSnapshots(req),
|
||||
types.ToResponseListSnapshots(res),
|
||||
)
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes {
|
||||
func (app *localClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -176,10 +171,13 @@ func (app *localClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqR
|
||||
return app.callback(
|
||||
types.ToRequestOfferSnapshot(req),
|
||||
types.ToResponseOfferSnapshot(res),
|
||||
)
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes {
|
||||
func (app *localClient) LoadSnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -187,10 +185,13 @@ func (app *localClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChun
|
||||
return app.callback(
|
||||
types.ToRequestLoadSnapshotChunk(req),
|
||||
types.ToResponseLoadSnapshotChunk(res),
|
||||
)
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes {
|
||||
func (app *localClient) ApplySnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -198,20 +199,20 @@ func (app *localClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotCh
|
||||
return app.callback(
|
||||
types.ToRequestApplySnapshotChunk(req),
|
||||
types.ToResponseApplySnapshotChunk(res),
|
||||
)
|
||||
), nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) FlushSync() error {
|
||||
func (app *localClient) FlushSync(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *localClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
func (app *localClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: msg}, nil
|
||||
}
|
||||
|
||||
func (app *localClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -219,15 +220,11 @@ func (app *localClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, er
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
func (app *localClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
|
||||
res := app.Application.SetOption(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -235,7 +232,10 @@ func (app *localClient) DeliverTxSync(req types.RequestDeliverTx) (*types.Respon
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
func (app *localClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -243,7 +243,10 @@ func (app *localClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCh
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
func (app *localClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -251,7 +254,7 @@ func (app *localClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery,
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CommitSync() (*types.ResponseCommit, error) {
|
||||
func (app *localClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -259,7 +262,11 @@ func (app *localClient) CommitSync() (*types.ResponseCommit, error) {
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
func (app *localClient) InitChainSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -267,7 +274,11 @@ func (app *localClient) InitChainSync(req types.RequestInitChain) (*types.Respon
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
func (app *localClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -275,7 +286,11 @@ func (app *localClient) BeginBlockSync(req types.RequestBeginBlock) (*types.Resp
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
func (app *localClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -283,7 +298,11 @@ func (app *localClient) EndBlockSync(req types.RequestEndBlock) (*types.Response
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
func (app *localClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -291,7 +310,11 @@ func (app *localClient) ListSnapshotsSync(req types.RequestListSnapshots) (*type
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
func (app *localClient) OfferSnapshotSync(
|
||||
ctx context.Context,
|
||||
req types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -300,7 +323,9 @@ func (app *localClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*type
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -309,7 +334,9 @@ func (app *localClient) LoadSnapshotChunkSync(
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
|
||||
@@ -1,9 +1,12 @@
|
||||
// Code generated by mockery v1.1.1. DO NOT EDIT.
|
||||
// Code generated by mockery v2.3.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
|
||||
log "github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
@@ -16,29 +19,36 @@ type Client struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// ApplySnapshotChunkAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ApplySnapshotChunkAsync(_a0 types.RequestApplySnapshotChunk) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
// ApplySnapshotChunkAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ApplySnapshotChunkSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ApplySnapshotChunkSync(_a0 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// ApplySnapshotChunkSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk)
|
||||
@@ -46,8 +56,8 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 types.RequestApplySnapshotChunk) (*
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -55,29 +65,36 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 types.RequestApplySnapshotChunk) (*
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// BeginBlockAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) BeginBlockAsync(_a0 types.RequestBeginBlock) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
// BeginBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// BeginBlockSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) BeginBlockSync(_a0 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// BeginBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseBeginBlock
|
||||
if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseBeginBlock)
|
||||
@@ -85,8 +102,8 @@ func (_m *Client) BeginBlockSync(_a0 types.RequestBeginBlock) (*types.ResponseBe
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -94,29 +111,36 @@ func (_m *Client) BeginBlockSync(_a0 types.RequestBeginBlock) (*types.ResponseBe
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CheckTxAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CheckTxAsync(_a0 types.RequestCheckTx) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
// CheckTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CheckTxSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CheckTxSync(_a0 types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// CheckTxSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCheckTx)
|
||||
@@ -124,7 +148,30 @@ func (_m *Client) CheckTxSync(_a0 types.RequestCheckTx) (*types.ResponseCheckTx,
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestCheckTx) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CommitAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CommitAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -133,29 +180,13 @@ func (_m *Client) CheckTxSync(_a0 types.RequestCheckTx) (*types.ResponseCheckTx,
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CommitAsync provides a mock function with given fields:
|
||||
func (_m *Client) CommitAsync() *abcicli.ReqRes {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func() *abcicli.ReqRes); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// CommitSync provides a mock function with given fields:
|
||||
func (_m *Client) CommitSync() (*types.ResponseCommit, error) {
|
||||
ret := _m.Called()
|
||||
// CommitSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseCommit
|
||||
if rf, ok := ret.Get(0).(func() *types.ResponseCommit); ok {
|
||||
r0 = rf()
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *types.ResponseCommit); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCommit)
|
||||
@@ -163,8 +194,8 @@ func (_m *Client) CommitSync() (*types.ResponseCommit, error) {
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func() error); ok {
|
||||
r1 = rf()
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -172,29 +203,36 @@ func (_m *Client) CommitSync() (*types.ResponseCommit, error) {
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DeliverTxAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) DeliverTxAsync(_a0 types.RequestDeliverTx) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
// DeliverTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DeliverTxSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) DeliverTxSync(_a0 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// DeliverTxSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseDeliverTx
|
||||
if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseDeliverTx)
|
||||
@@ -202,8 +240,8 @@ func (_m *Client) DeliverTxSync(_a0 types.RequestDeliverTx) (*types.ResponseDeli
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -211,29 +249,36 @@ func (_m *Client) DeliverTxSync(_a0 types.RequestDeliverTx) (*types.ResponseDeli
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EchoAsync provides a mock function with given fields: msg
|
||||
func (_m *Client) EchoAsync(msg string) *abcicli.ReqRes {
|
||||
ret := _m.Called(msg)
|
||||
// EchoAsync provides a mock function with given fields: ctx, msg
|
||||
func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(ctx, msg)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(string) *abcicli.ReqRes); ok {
|
||||
r0 = rf(msg)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *abcicli.ReqRes); ok {
|
||||
r0 = rf(ctx, msg)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, msg)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EchoSync provides a mock function with given fields: msg
|
||||
func (_m *Client) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
ret := _m.Called(msg)
|
||||
// EchoSync provides a mock function with given fields: ctx, msg
|
||||
func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
ret := _m.Called(ctx, msg)
|
||||
|
||||
var r0 *types.ResponseEcho
|
||||
if rf, ok := ret.Get(0).(func(string) *types.ResponseEcho); ok {
|
||||
r0 = rf(msg)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *types.ResponseEcho); ok {
|
||||
r0 = rf(ctx, msg)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseEcho)
|
||||
@@ -241,8 +286,8 @@ func (_m *Client) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string) error); ok {
|
||||
r1 = rf(msg)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, msg)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -250,29 +295,36 @@ func (_m *Client) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EndBlockAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) EndBlockAsync(_a0 types.RequestEndBlock) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
// EndBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestEndBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EndBlockSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) EndBlockSync(_a0 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// EndBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseEndBlock
|
||||
if rf, ok := ret.Get(0).(func(types.RequestEndBlock) *types.ResponseEndBlock); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseEndBlock)
|
||||
@@ -280,8 +332,8 @@ func (_m *Client) EndBlockSync(_a0 types.RequestEndBlock) (*types.ResponseEndBlo
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -303,42 +355,12 @@ func (_m *Client) Error() error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// FlushAsync provides a mock function with given fields:
|
||||
func (_m *Client) FlushAsync() *abcicli.ReqRes {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func() *abcicli.ReqRes); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// FlushSync provides a mock function with given fields:
|
||||
func (_m *Client) FlushSync() error {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func() error); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// InfoAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) InfoAsync(_a0 types.RequestInfo) *abcicli.ReqRes {
|
||||
// FlushAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInfo) *abcicli.ReqRes); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -346,24 +368,8 @@ func (_m *Client) InfoAsync(_a0 types.RequestInfo) *abcicli.ReqRes {
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// InfoSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) InfoSync(_a0 types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInfo) *types.ResponseInfo); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInfo)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestInfo) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -372,29 +378,96 @@ func (_m *Client) InfoSync(_a0 types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChainAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) InitChainAsync(_a0 types.RequestInitChain) *abcicli.ReqRes {
|
||||
// FlushSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) FlushSync(_a0 context.Context) error {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInitChain) *abcicli.ReqRes); ok {
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// InfoAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChainSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) InitChainSync(_a0 types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// InfoSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *types.ResponseInfo); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInfo)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChainAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChainSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InitChainSync(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInitChain
|
||||
if rf, ok := ret.Get(0).(func(types.RequestInitChain) *types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *types.ResponseInitChain); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInitChain)
|
||||
@@ -402,8 +475,8 @@ func (_m *Client) InitChainSync(_a0 types.RequestInitChain) (*types.ResponseInit
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -425,29 +498,36 @@ func (_m *Client) IsRunning() bool {
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListSnapshotsAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ListSnapshotsAsync(_a0 types.RequestListSnapshots) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
// ListSnapshotsAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ListSnapshotsSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// ListSnapshotsSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseListSnapshots
|
||||
if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseListSnapshots)
|
||||
@@ -455,8 +535,8 @@ func (_m *Client) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*types.Resp
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -464,29 +544,36 @@ func (_m *Client) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*types.Resp
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// LoadSnapshotChunkAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) LoadSnapshotChunkAsync(_a0 types.RequestLoadSnapshotChunk) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
// LoadSnapshotChunkAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// LoadSnapshotChunkSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// LoadSnapshotChunkSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseLoadSnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk)
|
||||
@@ -494,8 +581,8 @@ func (_m *Client) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotChunk) (*ty
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -503,29 +590,36 @@ func (_m *Client) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotChunk) (*ty
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// OfferSnapshotAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) OfferSnapshotAsync(_a0 types.RequestOfferSnapshot) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
// OfferSnapshotAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// OfferSnapshotSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) OfferSnapshotSync(_a0 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// OfferSnapshotSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseOfferSnapshot
|
||||
if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseOfferSnapshot)
|
||||
@@ -533,8 +627,8 @@ func (_m *Client) OfferSnapshotSync(_a0 types.RequestOfferSnapshot) (*types.Resp
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -575,29 +669,36 @@ func (_m *Client) OnStop() {
|
||||
_m.Called()
|
||||
}
|
||||
|
||||
// QueryAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) QueryAsync(_a0 types.RequestQuery) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
// QueryAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestQuery) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// QuerySync provides a mock function with given fields: _a0
|
||||
func (_m *Client) QuerySync(_a0 types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
ret := _m.Called(_a0)
|
||||
// QuerySync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseQuery
|
||||
if rf, ok := ret.Get(0).(func(types.RequestQuery) *types.ResponseQuery); ok {
|
||||
r0 = rf(_a0)
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *types.ResponseQuery); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseQuery)
|
||||
@@ -605,8 +706,8 @@ func (_m *Client) QuerySync(_a0 types.RequestQuery) (*types.ResponseQuery, error
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0)
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
@@ -649,45 +750,6 @@ func (_m *Client) SetLogger(_a0 log.Logger) {
|
||||
_m.Called(_a0)
|
||||
}
|
||||
|
||||
// SetOptionAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) SetOptionAsync(_a0 types.RequestSetOption) *abcicli.ReqRes {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(types.RequestSetOption) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// SetOptionSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) SetOptionSync(_a0 types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseSetOption
|
||||
if rf, ok := ret.Get(0).(func(types.RequestSetOption) *types.ResponseSetOption); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseSetOption)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(types.RequestSetOption) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// SetResponseCallback provides a mock function with given fields: _a0
|
||||
func (_m *Client) SetResponseCallback(_a0 abcicli.Callback) {
|
||||
_m.Called(_a0)
|
||||
|
||||
@@ -3,6 +3,7 @@ package abcicli
|
||||
import (
|
||||
"bufio"
|
||||
"container/list"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
@@ -18,10 +19,18 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
reqQueueSize = 256 // TODO make configurable
|
||||
flushThrottleMS = 20 // Don't wait longer than...
|
||||
// reqQueueSize is the max number of queued async requests.
|
||||
// (memory: 256MB max assuming 1MB transactions)
|
||||
reqQueueSize = 256
|
||||
// Don't wait longer than...
|
||||
flushThrottleMS = 20
|
||||
)
|
||||
|
||||
type reqResWithContext struct {
|
||||
R *ReqRes
|
||||
C context.Context // if context.Err is not nil, reqRes will be thrown away (ignored)
|
||||
}
|
||||
|
||||
// This is goroutine-safe, but users should beware that the application in
|
||||
// general is not meant to be interfaced with concurrent callers.
|
||||
type socketClient struct {
|
||||
@@ -31,7 +40,7 @@ type socketClient struct {
|
||||
mustConnect bool
|
||||
conn net.Conn
|
||||
|
||||
reqQueue chan *ReqRes
|
||||
reqQueue chan *reqResWithContext
|
||||
flushTimer *timer.ThrottleTimer
|
||||
|
||||
mtx tmsync.Mutex
|
||||
@@ -47,7 +56,7 @@ var _ Client = (*socketClient)(nil)
|
||||
// if it fails to connect.
|
||||
func NewSocketClient(addr string, mustConnect bool) Client {
|
||||
cli := &socketClient{
|
||||
reqQueue: make(chan *ReqRes, reqQueueSize),
|
||||
reqQueue: make(chan *reqResWithContext, reqQueueSize),
|
||||
flushTimer: timer.NewThrottleTimer("socketClient", flushThrottleMS),
|
||||
mustConnect: mustConnect,
|
||||
|
||||
@@ -123,15 +132,20 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
|
||||
case reqres := <-cli.reqQueue:
|
||||
// cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request)
|
||||
|
||||
cli.willSendReq(reqres)
|
||||
err := types.WriteMessage(reqres.Request, w)
|
||||
if reqres.C.Err() != nil {
|
||||
cli.Logger.Debug("Request's context is done", "req", reqres.R, "err", reqres.C.Err())
|
||||
continue
|
||||
}
|
||||
|
||||
cli.willSendReq(reqres.R)
|
||||
err := types.WriteMessage(reqres.R.Request, w)
|
||||
if err != nil {
|
||||
cli.stopForError(fmt.Errorf("write to buffer: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
// If it's a flush request, flush the current buffer.
|
||||
if _, ok := reqres.Request.Value.(*types.Request_Flush); ok {
|
||||
if _, ok := reqres.R.Request.Value.(*types.Request_Flush); ok {
|
||||
err = w.Flush()
|
||||
if err != nil {
|
||||
cli.stopForError(fmt.Errorf("flush buffer: %w", err))
|
||||
@@ -140,7 +154,7 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
|
||||
}
|
||||
case <-cli.flushTimer.Ch: // flush queue
|
||||
select {
|
||||
case cli.reqQueue <- NewReqRes(types.ToRequestFlush()):
|
||||
case cli.reqQueue <- &reqResWithContext{R: NewReqRes(types.ToRequestFlush()), C: context.Background()}:
|
||||
default:
|
||||
// Probably will fill the buffer, or retry later.
|
||||
}
|
||||
@@ -221,211 +235,264 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) EchoAsync(msg string) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestEcho(msg))
|
||||
func (cli *socketClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestEcho(msg))
|
||||
}
|
||||
|
||||
func (cli *socketClient) FlushAsync() *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestFlush())
|
||||
func (cli *socketClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestFlush())
|
||||
}
|
||||
|
||||
func (cli *socketClient) InfoAsync(req types.RequestInfo) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestInfo(req))
|
||||
func (cli *socketClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestInfo(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestSetOption(req))
|
||||
func (cli *socketClient) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestDeliverTx(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxAsync(req types.RequestDeliverTx) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestDeliverTx(req))
|
||||
func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestCheckTx(req))
|
||||
func (cli *socketClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestQuery(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) QueryAsync(req types.RequestQuery) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestQuery(req))
|
||||
func (cli *socketClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestCommit())
|
||||
}
|
||||
|
||||
func (cli *socketClient) CommitAsync() *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestCommit())
|
||||
func (cli *socketClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestInitChain(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestInitChain(req))
|
||||
func (cli *socketClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestBeginBlock(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestBeginBlock(req))
|
||||
func (cli *socketClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestEndBlock(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestEndBlock(req))
|
||||
func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestListSnapshots(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestListSnapshots(req))
|
||||
func (cli *socketClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestOfferSnapshot(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestOfferSnapshot(req))
|
||||
func (cli *socketClient) LoadSnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestLoadSnapshotChunk(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestApplySnapshotChunk(req))
|
||||
func (cli *socketClient) ApplySnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) FlushSync() error {
|
||||
reqRes := cli.queueRequest(types.ToRequestFlush())
|
||||
func (cli *socketClient) FlushSync(ctx context.Context) error {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestFlush(), true)
|
||||
if err != nil {
|
||||
return queueErr(err)
|
||||
}
|
||||
|
||||
if err := cli.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
reqRes.Wait() // NOTE: if we don't flush the queue, its possible to get stuck here
|
||||
return cli.Error()
|
||||
|
||||
gotResp := make(chan struct{})
|
||||
go func() {
|
||||
// NOTE: if we don't flush the queue, its possible to get stuck here
|
||||
reqRes.Wait()
|
||||
close(gotResp)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-gotResp:
|
||||
return cli.Error()
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *socketClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestEcho(msg))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
func (cli *socketClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEcho(msg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetEcho(), cli.Error()
|
||||
return reqres.Response.GetEcho(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestInfo(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
func (cli *socketClient) InfoSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInfo,
|
||||
) (*types.ResponseInfo, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInfo(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetInfo(), cli.Error()
|
||||
return reqres.Response.GetInfo(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestSetOption(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
func (cli *socketClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestDeliverTx(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetSetOption(), cli.Error()
|
||||
return reqres.Response.GetDeliverTx(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestDeliverTx(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
func (cli *socketClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCheckTx(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetDeliverTx(), cli.Error()
|
||||
return reqres.Response.GetCheckTx(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestCheckTx(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
func (cli *socketClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestQuery(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetCheckTx(), cli.Error()
|
||||
return reqres.Response.GetQuery(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestQuery(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
func (cli *socketClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCommit())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetQuery(), cli.Error()
|
||||
return reqres.Response.GetCommit(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) CommitSync() (*types.ResponseCommit, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestCommit())
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
func (cli *socketClient) InitChainSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInitChain(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetCommit(), cli.Error()
|
||||
return reqres.Response.GetInitChain(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestInitChain(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
func (cli *socketClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestBeginBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetInitChain(), cli.Error()
|
||||
return reqres.Response.GetBeginBlock(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestBeginBlock(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
func (cli *socketClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEndBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetBeginBlock(), cli.Error()
|
||||
return reqres.Response.GetEndBlock(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestEndBlock(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
func (cli *socketClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestListSnapshots(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetEndBlock(), cli.Error()
|
||||
return reqres.Response.GetListSnapshots(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestListSnapshots(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
func (cli *socketClient) OfferSnapshotSync(
|
||||
ctx context.Context,
|
||||
req types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestOfferSnapshot(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetListSnapshots(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestOfferSnapshot(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetOfferSnapshot(), cli.Error()
|
||||
return reqres.Response.GetOfferSnapshot(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestLoadSnapshotChunk(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres.Response.GetLoadSnapshotChunk(), cli.Error()
|
||||
return reqres.Response.GetLoadSnapshotChunk(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestApplySnapshotChunk(req))
|
||||
if err := cli.FlushSync(); err != nil {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetApplySnapshotChunk(), cli.Error()
|
||||
return reqres.Response.GetApplySnapshotChunk(), nil
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) queueRequest(req *types.Request) *ReqRes {
|
||||
// queueRequest enqueues req onto the queue. If the queue is full, it ether
|
||||
// returns an error (sync=false) or blocks (sync=true).
|
||||
//
|
||||
// When sync=true, ctx can be used to break early. When sync=false, ctx will be
|
||||
// used later to determine if request should be dropped (if ctx.Err is
|
||||
// non-nil).
|
||||
//
|
||||
// The caller is responsible for checking cli.Error.
|
||||
func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, sync bool) (*ReqRes, error) {
|
||||
reqres := NewReqRes(req)
|
||||
|
||||
// TODO: set cli.err if reqQueue times out
|
||||
cli.reqQueue <- reqres
|
||||
if sync {
|
||||
select {
|
||||
case cli.reqQueue <- &reqResWithContext{R: reqres, C: context.Background()}:
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
} else {
|
||||
select {
|
||||
case cli.reqQueue <- &reqResWithContext{R: reqres, C: ctx}:
|
||||
default:
|
||||
return nil, errors.New("buffer is full")
|
||||
}
|
||||
}
|
||||
|
||||
// Maybe auto-flush, or unset auto-flush
|
||||
switch req.Value.(type) {
|
||||
@@ -435,7 +502,41 @@ func (cli *socketClient) queueRequest(req *types.Request) *ReqRes {
|
||||
cli.flushTimer.Set()
|
||||
}
|
||||
|
||||
return reqres
|
||||
return reqres, nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) queueRequestAsync(
|
||||
ctx context.Context,
|
||||
req *types.Request,
|
||||
) (*ReqRes, error) {
|
||||
|
||||
reqres, err := cli.queueRequest(ctx, req, false)
|
||||
if err != nil {
|
||||
return nil, queueErr(err)
|
||||
}
|
||||
|
||||
return reqres, cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) queueRequestAndFlushSync(
|
||||
ctx context.Context,
|
||||
req *types.Request,
|
||||
) (*ReqRes, error) {
|
||||
|
||||
reqres, err := cli.queueRequest(ctx, req, true)
|
||||
if err != nil {
|
||||
return nil, queueErr(err)
|
||||
}
|
||||
|
||||
if err := cli.FlushSync(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return reqres, cli.Error()
|
||||
}
|
||||
|
||||
func queueErr(e error) error {
|
||||
return fmt.Errorf("can't queue req: %w", e)
|
||||
}
|
||||
|
||||
func (cli *socketClient) flushQueue() {
|
||||
@@ -453,7 +554,7 @@ LOOP:
|
||||
for {
|
||||
select {
|
||||
case reqres := <-cli.reqQueue:
|
||||
reqres.Done()
|
||||
reqres.R.Done()
|
||||
default:
|
||||
break LOOP
|
||||
}
|
||||
@@ -470,8 +571,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_Flush)
|
||||
case *types.Request_Info:
|
||||
_, ok = res.Value.(*types.Response_Info)
|
||||
case *types.Request_SetOption:
|
||||
_, ok = res.Value.(*types.Response_SetOption)
|
||||
case *types.Request_DeliverTx:
|
||||
_, ok = res.Value.(*types.Response_DeliverTx)
|
||||
case *types.Request_CheckTx:
|
||||
@@ -504,12 +603,10 @@ func (cli *socketClient) stopForError(err error) {
|
||||
}
|
||||
|
||||
cli.mtx.Lock()
|
||||
if cli.err == nil {
|
||||
cli.err = err
|
||||
}
|
||||
cli.err = err
|
||||
cli.mtx.Unlock()
|
||||
|
||||
cli.Logger.Error(fmt.Sprintf("Stopping abci.socketClient for error: %v", err.Error()))
|
||||
cli.Logger.Info("Stopping abci.socketClient", "reason", err)
|
||||
if err := cli.Stop(); err != nil {
|
||||
cli.Logger.Error("Error stopping abci.socketClient", "err", err)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package abcicli_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -15,6 +16,8 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func TestProperSyncCalls(t *testing.T) {
|
||||
app := slowApp{}
|
||||
|
||||
@@ -33,11 +36,12 @@ func TestProperSyncCalls(t *testing.T) {
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// This is BeginBlockSync unrolled....
|
||||
reqres := c.BeginBlockAsync(types.RequestBeginBlock{})
|
||||
err := c.FlushSync()
|
||||
require.NoError(t, err)
|
||||
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||
assert.NoError(t, err)
|
||||
err = c.FlushSync(context.Background())
|
||||
assert.NoError(t, err)
|
||||
res := reqres.Response.GetBeginBlock()
|
||||
require.NotNil(t, res)
|
||||
assert.NotNil(t, res)
|
||||
resp <- c.Error()
|
||||
}()
|
||||
|
||||
@@ -68,14 +72,16 @@ func TestHangingSyncCalls(t *testing.T) {
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// Start BeginBlock and flush it
|
||||
reqres := c.BeginBlockAsync(types.RequestBeginBlock{})
|
||||
flush := c.FlushAsync()
|
||||
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||
assert.NoError(t, err)
|
||||
flush, err := c.FlushAsync(ctx)
|
||||
assert.NoError(t, err)
|
||||
// wait 20 ms for all events to travel socket, but
|
||||
// no response yet from server
|
||||
time.Sleep(20 * time.Millisecond)
|
||||
// kill the server, so the connections break
|
||||
err := s.Stop()
|
||||
require.NoError(t, err)
|
||||
err = s.Stop()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// wait for the response from BeginBlock
|
||||
reqres.Wait()
|
||||
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
@@ -29,6 +30,8 @@ import (
|
||||
var (
|
||||
client abcicli.Client
|
||||
logger log.Logger
|
||||
|
||||
ctx = context.Background()
|
||||
)
|
||||
|
||||
// flags
|
||||
@@ -148,7 +151,6 @@ func addCommands() {
|
||||
RootCmd.AddCommand(consoleCmd)
|
||||
RootCmd.AddCommand(echoCmd)
|
||||
RootCmd.AddCommand(infoCmd)
|
||||
RootCmd.AddCommand(setOptionCmd)
|
||||
RootCmd.AddCommand(deliverTxCmd)
|
||||
RootCmd.AddCommand(checkTxCmd)
|
||||
RootCmd.AddCommand(commitCmd)
|
||||
@@ -176,7 +178,6 @@ you'd like to run:
|
||||
|
||||
where example.file looks something like:
|
||||
|
||||
set_option serial on
|
||||
check_tx 0x00
|
||||
check_tx 0xff
|
||||
deliver_tx 0x00
|
||||
@@ -198,7 +199,7 @@ This command opens an interactive console for running any of the other commands
|
||||
without opening a new connection each time
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
ValidArgs: []string{"echo", "info", "set_option", "deliver_tx", "check_tx", "commit", "query"},
|
||||
ValidArgs: []string{"echo", "info", "deliver_tx", "check_tx", "commit", "query"},
|
||||
RunE: cmdConsole,
|
||||
}
|
||||
|
||||
@@ -216,13 +217,6 @@ var infoCmd = &cobra.Command{
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdInfo,
|
||||
}
|
||||
var setOptionCmd = &cobra.Command{
|
||||
Use: "set_option",
|
||||
Short: "set an option on the application",
|
||||
Long: "set an option on the application",
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: cmdSetOption,
|
||||
}
|
||||
|
||||
var deliverTxCmd = &cobra.Command{
|
||||
Use: "deliver_tx",
|
||||
@@ -324,7 +318,6 @@ func cmdTest(cmd *cobra.Command, args []string) error {
|
||||
return compose(
|
||||
[]func() error{
|
||||
func() error { return servertest.InitChain(client) },
|
||||
func() error { return servertest.SetOption(client, "serial", "on") },
|
||||
func() error { return servertest.Commit(client, nil) },
|
||||
func() error { return servertest.DeliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil) },
|
||||
func() error { return servertest.Commit(client, nil) },
|
||||
@@ -439,8 +432,6 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
|
||||
return cmdInfo(cmd, actualArgs)
|
||||
case "query":
|
||||
return cmdQuery(cmd, actualArgs)
|
||||
case "set_option":
|
||||
return cmdSetOption(cmd, actualArgs)
|
||||
default:
|
||||
return cmdUnimplemented(cmd, pArgs)
|
||||
}
|
||||
@@ -464,7 +455,6 @@ func cmdUnimplemented(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("%s: %s\n", deliverTxCmd.Use, deliverTxCmd.Short)
|
||||
fmt.Printf("%s: %s\n", queryCmd.Use, queryCmd.Short)
|
||||
fmt.Printf("%s: %s\n", commitCmd.Use, commitCmd.Short)
|
||||
fmt.Printf("%s: %s\n", setOptionCmd.Use, setOptionCmd.Short)
|
||||
fmt.Println("Use \"[command] --help\" for more information about a command.")
|
||||
|
||||
return nil
|
||||
@@ -476,7 +466,7 @@ func cmdEcho(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
msg = args[0]
|
||||
}
|
||||
res, err := client.EchoSync(msg)
|
||||
res, err := client.EchoSync(ctx, msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -492,7 +482,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 1 {
|
||||
version = args[0]
|
||||
}
|
||||
res, err := client.InfoSync(types.RequestInfo{Version: version})
|
||||
res, err := client.InfoSync(ctx, types.RequestInfo{Version: version})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -504,25 +494,6 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
|
||||
|
||||
const codeBad uint32 = 10
|
||||
|
||||
// Set an option on the application
|
||||
func cmdSetOption(cmd *cobra.Command, args []string) error {
|
||||
if len(args) < 2 {
|
||||
printResponse(cmd, args, response{
|
||||
Code: codeBad,
|
||||
Log: "want at least arguments of the form: <key> <value>",
|
||||
})
|
||||
return nil
|
||||
}
|
||||
|
||||
key, val := args[0], args[1]
|
||||
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: val})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
printResponse(cmd, args, response{Log: "OK (SetOption doesn't return anything.)"}) // NOTE: Nothing to show...
|
||||
return nil
|
||||
}
|
||||
|
||||
// Append a new tx to application
|
||||
func cmdDeliverTx(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 {
|
||||
@@ -536,7 +507,7 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
|
||||
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -562,7 +533,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.CheckTxSync(types.RequestCheckTx{Tx: txBytes})
|
||||
res, err := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -577,7 +548,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Get application Merkle root hash
|
||||
func cmdCommit(cmd *cobra.Command, args []string) error {
|
||||
res, err := client.CommitSync()
|
||||
res, err := client.CommitSync(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -602,7 +573,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resQuery, err := client.QuerySync(types.RequestQuery{
|
||||
resQuery, err := client.QuerySync(ctx, types.RequestQuery{
|
||||
Data: queryBytes,
|
||||
Path: flagPath,
|
||||
Height: int64(flagHeight),
|
||||
|
||||
@@ -24,24 +24,6 @@ func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
|
||||
}
|
||||
|
||||
func (app *Application) SetOption(req types.RequestSetOption) types.ResponseSetOption {
|
||||
key, value := req.Key, req.Value
|
||||
if key == "serial" && value == "on" {
|
||||
app.serial = true
|
||||
} else {
|
||||
/*
|
||||
TODO Panic and have the ABCI server pass an exception.
|
||||
The client can call SetOptionSync() and get an `error`.
|
||||
return types.ResponseSetOption{
|
||||
Error: fmt.Sprintf("Unknown key (%s) or value (%s)", key, value),
|
||||
}
|
||||
*/
|
||||
return types.ResponseSetOption{}
|
||||
}
|
||||
|
||||
return types.ResponseSetOption{}
|
||||
}
|
||||
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
@@ -69,6 +51,7 @@ func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package example
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
@@ -13,8 +14,6 @@ import (
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
|
||||
@@ -45,7 +44,7 @@ func TestGRPC(t *testing.T) {
|
||||
}
|
||||
|
||||
func testStream(t *testing.T, app types.Application) {
|
||||
numDeliverTxs := 20000
|
||||
const numDeliverTxs = 20000
|
||||
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
socket := fmt.Sprintf("unix://%v", socketFile)
|
||||
@@ -53,9 +52,8 @@ func testStream(t *testing.T, app types.Application) {
|
||||
// Start the listener
|
||||
server := abciserver.NewSocketServer(socket, app)
|
||||
server.SetLogger(log.TestingLogger().With("module", "abci-server"))
|
||||
if err := server.Start(); err != nil {
|
||||
require.NoError(t, err, "Error starting socket server")
|
||||
}
|
||||
err := server.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
if err := server.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
@@ -65,9 +63,8 @@ func testStream(t *testing.T, app types.Application) {
|
||||
// Connect to the socket
|
||||
client := abcicli.NewSocketClient(socket, false)
|
||||
client.SetLogger(log.TestingLogger().With("module", "abci-client"))
|
||||
if err := client.Start(); err != nil {
|
||||
t.Fatalf("Error starting socket client: %v", err.Error())
|
||||
}
|
||||
err = client.Start()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
if err := client.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
@@ -101,22 +98,24 @@ func testStream(t *testing.T, app types.Application) {
|
||||
}
|
||||
})
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
// Send request
|
||||
reqRes := client.DeliverTxAsync(types.RequestDeliverTx{Tx: []byte("test")})
|
||||
_ = reqRes
|
||||
// check err ?
|
||||
_, err = client.DeliverTxAsync(ctx, types.RequestDeliverTx{Tx: []byte("test")})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Sometimes send flush messages
|
||||
if counter%123 == 0 {
|
||||
client.FlushAsync()
|
||||
// check err ?
|
||||
if counter%128 == 0 {
|
||||
err = client.FlushSync(context.Background())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Send final flush message
|
||||
client.FlushAsync()
|
||||
_, err = client.FlushAsync(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
<-done
|
||||
}
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
func RandVal(i int) types.ValidatorUpdate {
|
||||
pubkey := tmrand.Bytes(32)
|
||||
power := tmrand.Uint16() + 1
|
||||
v := types.Ed25519ValidatorUpdate(pubkey, int64(power))
|
||||
v := types.UpdateValidator(pubkey, int64(power), "")
|
||||
return v
|
||||
}
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
package kvstore
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
@@ -23,6 +24,8 @@ const (
|
||||
testValue = "def"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||
req := types.RequestDeliverTx{Tx: tx}
|
||||
ar := app.DeliverTx(req)
|
||||
@@ -323,23 +326,23 @@ func runClientTests(t *testing.T, client abcicli.Client) {
|
||||
}
|
||||
|
||||
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
|
||||
ar, err := app.DeliverTxSync(types.RequestDeliverTx{Tx: tx})
|
||||
ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// repeating tx doesn't raise error
|
||||
ar, err = app.DeliverTxSync(types.RequestDeliverTx{Tx: tx})
|
||||
ar, err = app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// commit
|
||||
_, err = app.CommitSync()
|
||||
_, err = app.CommitSync(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := app.InfoSync(types.RequestInfo{})
|
||||
info, err := app.InfoSync(ctx, types.RequestInfo{})
|
||||
require.NoError(t, err)
|
||||
require.NotZero(t, info.LastBlockHeight)
|
||||
|
||||
// make sure query is fine
|
||||
resQuery, err := app.QuerySync(types.RequestQuery{
|
||||
resQuery, err := app.QuerySync(ctx, types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
})
|
||||
@@ -350,7 +353,7 @@ func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string)
|
||||
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
|
||||
|
||||
// make sure proof is fine
|
||||
resQuery, err = app.QuerySync(types.RequestQuery{
|
||||
resQuery, err = app.QuerySync(ctx, types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
Prove: true,
|
||||
|
||||
@@ -62,10 +62,6 @@ func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.Respo
|
||||
return res
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) SetOption(req types.RequestSetOption) types.ResponseSetOption {
|
||||
return app.app.SetOption(req)
|
||||
}
|
||||
|
||||
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
// if it starts with "val:", update the validator set
|
||||
@@ -212,7 +208,7 @@ func isValidatorTx(tx []byte) bool {
|
||||
func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.ResponseDeliverTx {
|
||||
tx = tx[len(ValidatorSetChangePrefix):]
|
||||
|
||||
//get the pubkey and power
|
||||
// get the pubkey and power
|
||||
pubKeyAndPower := strings.Split(string(tx), "!")
|
||||
if len(pubKeyAndPower) != 2 {
|
||||
return types.ResponseDeliverTx{
|
||||
@@ -238,16 +234,16 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon
|
||||
}
|
||||
|
||||
// update
|
||||
return app.updateValidator(types.Ed25519ValidatorUpdate(pubkey, power))
|
||||
return app.updateValidator(types.UpdateValidator(pubkey, power, ""))
|
||||
}
|
||||
|
||||
// add, update, or remove a validator
|
||||
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx {
|
||||
key := []byte("val:" + string(v.PubKey.GetEd25519()))
|
||||
pubkey, err := cryptoenc.PubKeyFromProto(v.PubKey)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("can't decode public key: %w", err))
|
||||
}
|
||||
key := []byte("val:" + string(pubkey.Bytes()))
|
||||
|
||||
if v.Power == 0 {
|
||||
// remove validator
|
||||
|
||||
@@ -200,9 +200,6 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
case *types.Request_Info:
|
||||
res := s.app.Info(*r.Info)
|
||||
responses <- types.ToResponseInfo(res)
|
||||
case *types.Request_SetOption:
|
||||
res := s.app.SetOption(*r.SetOption)
|
||||
responses <- types.ToResponseSetOption(res)
|
||||
case *types.Request_DeliverTx:
|
||||
res := s.app.DeliverTx(*r.DeliverTx)
|
||||
responses <- types.ToResponseDeliverTx(res)
|
||||
|
||||
@@ -2,6 +2,7 @@ package testsuite
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
@@ -10,15 +11,17 @@ import (
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func InitChain(client abcicli.Client) error {
|
||||
total := 10
|
||||
vals := make([]types.ValidatorUpdate, total)
|
||||
for i := 0; i < total; i++ {
|
||||
pubkey := tmrand.Bytes(33)
|
||||
power := tmrand.Int()
|
||||
vals[i] = types.Ed25519ValidatorUpdate(pubkey, int64(power))
|
||||
vals[i] = types.UpdateValidator(pubkey, int64(power), "")
|
||||
}
|
||||
_, err := client.InitChainSync(types.RequestInitChain{
|
||||
_, err := client.InitChainSync(ctx, types.RequestInitChain{
|
||||
Validators: vals,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -29,19 +32,8 @@ func InitChain(client abcicli.Client) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func SetOption(client abcicli.Client, key, value string) error {
|
||||
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value})
|
||||
if err != nil {
|
||||
fmt.Println("Failed test: SetOption")
|
||||
fmt.Printf("error while setting %v=%v: \nerror: %v\n", key, value, err)
|
||||
return err
|
||||
}
|
||||
fmt.Println("Passed test: SetOption")
|
||||
return nil
|
||||
}
|
||||
|
||||
func Commit(client abcicli.Client, hashExp []byte) error {
|
||||
res, err := client.CommitSync()
|
||||
res, err := client.CommitSync(ctx)
|
||||
data := res.Data
|
||||
if err != nil {
|
||||
fmt.Println("Failed test: Commit")
|
||||
@@ -58,7 +50,7 @@ func Commit(client abcicli.Client, hashExp []byte) error {
|
||||
}
|
||||
|
||||
func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
|
||||
res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
@@ -77,7 +69,7 @@ func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []
|
||||
}
|
||||
|
||||
func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.CheckTxSync(types.RequestCheckTx{Tx: txBytes})
|
||||
res, _ := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: CheckTx")
|
||||
|
||||
@@ -2,6 +2,7 @@ package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
@@ -10,6 +11,8 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func startClient(abciType string) abcicli.Client {
|
||||
// Start client
|
||||
client, err := abcicli.NewClient("tcp://127.0.0.1:26658", abciType, true)
|
||||
@@ -25,15 +28,8 @@ func startClient(abciType string) abcicli.Client {
|
||||
return client
|
||||
}
|
||||
|
||||
func setOption(client abcicli.Client, key, value string) {
|
||||
_, err := client.SetOptionSync(types.RequestSetOption{Key: key, Value: value})
|
||||
if err != nil {
|
||||
panicf("setting %v=%v: \nerr: %v", key, value, err)
|
||||
}
|
||||
}
|
||||
|
||||
func commit(client abcicli.Client, hashExp []byte) {
|
||||
res, err := client.CommitSync()
|
||||
res, err := client.CommitSync(ctx)
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
@@ -43,7 +39,7 @@ func commit(client abcicli.Client, hashExp []byte) {
|
||||
}
|
||||
|
||||
func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
|
||||
res, err := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
|
||||
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
@@ -55,24 +51,6 @@ func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []
|
||||
}
|
||||
}
|
||||
|
||||
/*func checkTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
|
||||
res, err := client.CheckTxSync(txBytes)
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if res.IsErr() {
|
||||
panicf("checking tx %X: %v\nlog: %v", txBytes, res.Log)
|
||||
}
|
||||
if res.Code != codeExp {
|
||||
panicf("CheckTx response code was unexpected. Got %v expected %v. Log: %v",
|
||||
res.Code, codeExp, res.Log)
|
||||
}
|
||||
if !bytes.Equal(res.Data, dataExp) {
|
||||
panicf("CheckTx response data was unexpected. Got %X expected %X",
|
||||
res.Data, dataExp)
|
||||
}
|
||||
}*/
|
||||
|
||||
func panicf(format string, a ...interface{}) {
|
||||
panic(fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"os/exec"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
@@ -41,7 +40,7 @@ func ensureABCIIsUp(typ string, n int) error {
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
<-time.After(500 * time.Millisecond)
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@@ -69,7 +68,7 @@ func testCounter() {
|
||||
}()
|
||||
|
||||
if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil {
|
||||
log.Fatalf("echo failed: %v", err)
|
||||
log.Fatalf("echo failed: %v", err) //nolint:gocritic
|
||||
}
|
||||
|
||||
client := startClient(abciType)
|
||||
@@ -79,17 +78,16 @@ func testCounter() {
|
||||
}
|
||||
}()
|
||||
|
||||
setOption(client, "serial", "on")
|
||||
commit(client, nil)
|
||||
deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
|
||||
// commit(client, nil)
|
||||
// deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
|
||||
commit(client, nil)
|
||||
deliverTx(client, []byte{0x00}, types.CodeTypeOK, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
|
||||
deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
|
||||
// deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
|
||||
deliverTx(client, []byte{0x01}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x02}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x03}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x00, 0x04}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
// deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
|
||||
}
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
set_option serial on
|
||||
check_tx 0x00
|
||||
check_tx 0xff
|
||||
deliver_tx 0x00
|
||||
|
||||
@@ -1,7 +1,3 @@
|
||||
> set_option serial on
|
||||
-> code: OK
|
||||
-> log: OK (SetOption doesn't return anything.)
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: OK
|
||||
|
||||
@@ -12,18 +8,16 @@
|
||||
-> code: OK
|
||||
|
||||
> check_tx 0x00
|
||||
-> code: 2
|
||||
-> log: Invalid nonce. Expected >= 1, got 0
|
||||
-> code: OK
|
||||
|
||||
> deliver_tx 0x01
|
||||
-> code: OK
|
||||
|
||||
> deliver_tx 0x04
|
||||
-> code: 2
|
||||
-> log: Invalid nonce. Expected 2, got 4
|
||||
-> code: OK
|
||||
|
||||
> info
|
||||
-> code: OK
|
||||
-> data: {"hashes":0,"txs":2}
|
||||
-> data.hex: 0x7B22686173686573223A302C22747873223A327D
|
||||
-> data: {"hashes":0,"txs":3}
|
||||
-> data.hex: 0x7B22686173686573223A302C22747873223A337D
|
||||
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
context "golang.org/x/net/context"
|
||||
"context"
|
||||
)
|
||||
|
||||
// Application is an interface that enables any finite, deterministic state machine
|
||||
@@ -10,9 +10,8 @@ import (
|
||||
// except CheckTx/DeliverTx, which take `tx []byte`, and `Commit`, which takes nothing.
|
||||
type Application interface {
|
||||
// Info/Query Connection
|
||||
Info(RequestInfo) ResponseInfo // Return application info
|
||||
SetOption(RequestSetOption) ResponseSetOption // Set application option
|
||||
Query(RequestQuery) ResponseQuery // Query for state
|
||||
Info(RequestInfo) ResponseInfo // Return application info
|
||||
Query(RequestQuery) ResponseQuery // Query for state
|
||||
|
||||
// Mempool Connection
|
||||
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
|
||||
@@ -47,10 +46,6 @@ func (BaseApplication) Info(req RequestInfo) ResponseInfo {
|
||||
return ResponseInfo{}
|
||||
}
|
||||
|
||||
func (BaseApplication) SetOption(req RequestSetOption) ResponseSetOption {
|
||||
return ResponseSetOption{}
|
||||
}
|
||||
|
||||
func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx {
|
||||
return ResponseDeliverTx{Code: CodeTypeOK}
|
||||
}
|
||||
@@ -119,11 +114,6 @@ func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*Respon
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) SetOption(ctx context.Context, req *RequestSetOption) (*ResponseSetOption, error) {
|
||||
res := app.app.SetOption(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
|
||||
res := app.app.DeliverTx(*req)
|
||||
return &res, nil
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/tendermint/tendermint/libs/protoio"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -14,57 +13,19 @@ const (
|
||||
|
||||
// WriteMessage writes a varint length-delimited protobuf message.
|
||||
func WriteMessage(msg proto.Message, w io.Writer) error {
|
||||
bz, err := proto.Marshal(msg)
|
||||
protoWriter := protoio.NewDelimitedWriter(w)
|
||||
_, err := protoWriter.WriteMsg(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return encodeByteSlice(w, bz)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadMessage reads a varint length-delimited protobuf message.
|
||||
func ReadMessage(r io.Reader, msg proto.Message) error {
|
||||
return readProtoMsg(r, msg, maxMsgSize)
|
||||
}
|
||||
|
||||
func readProtoMsg(r io.Reader, msg proto.Message, maxSize int) error {
|
||||
// binary.ReadVarint takes an io.ByteReader, eg. a bufio.Reader
|
||||
reader, ok := r.(*bufio.Reader)
|
||||
if !ok {
|
||||
reader = bufio.NewReader(r)
|
||||
}
|
||||
length64, err := binary.ReadVarint(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
length := int(length64)
|
||||
if length < 0 || length > maxSize {
|
||||
return io.ErrShortBuffer
|
||||
}
|
||||
buf := make([]byte, length)
|
||||
if _, err := io.ReadFull(reader, buf); err != nil {
|
||||
return err
|
||||
}
|
||||
return proto.Unmarshal(buf, msg)
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------
|
||||
// NOTE: we copied wire.EncodeByteSlice from go-wire rather than keep
|
||||
// go-wire as a dep
|
||||
|
||||
func encodeByteSlice(w io.Writer, bz []byte) (err error) {
|
||||
err = encodeVarint(w, int64(len(bz)))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
_, err = w.Write(bz)
|
||||
return
|
||||
}
|
||||
|
||||
func encodeVarint(w io.Writer, i int64) (err error) {
|
||||
var buf [10]byte
|
||||
n := binary.PutVarint(buf[:], i)
|
||||
_, err = w.Write(buf[0:n])
|
||||
return
|
||||
_, err := protoio.NewDelimitedReader(r, maxMsgSize).ReadMsg(msg)
|
||||
return err
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -87,12 +48,6 @@ func ToRequestInfo(req RequestInfo) *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestSetOption(req RequestSetOption) *Request {
|
||||
return &Request{
|
||||
Value: &Request_SetOption{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestDeliverTx(req RequestDeliverTx) *Request {
|
||||
return &Request{
|
||||
Value: &Request_DeliverTx{&req},
|
||||
@@ -184,13 +139,6 @@ func ToResponseInfo(res ResponseInfo) *Response {
|
||||
Value: &Response_Info{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseSetOption(res ResponseSetOption) *Response {
|
||||
return &Response{
|
||||
Value: &Response_SetOption{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseDeliverTx(res ResponseDeliverTx) *Response {
|
||||
return &Response{
|
||||
Value: &Response_DeliverTx{&res},
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
|
||||
)
|
||||
|
||||
const (
|
||||
PubKeyEd25519 = "ed25519"
|
||||
"github.com/tendermint/tendermint/crypto/secp256k1"
|
||||
)
|
||||
|
||||
func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate {
|
||||
pke := ed25519.PubKey(pk)
|
||||
|
||||
pkp, err := cryptoenc.PubKeyToProto(pke)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -22,3 +22,23 @@ func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate {
|
||||
Power: power,
|
||||
}
|
||||
}
|
||||
|
||||
func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate {
|
||||
switch keyType {
|
||||
case "", ed25519.KeyType:
|
||||
return Ed25519ValidatorUpdate(pk, power)
|
||||
case secp256k1.KeyType:
|
||||
pke := secp256k1.PubKey(pk)
|
||||
pkp, err := cryptoenc.PubKeyToProto(pke)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return ValidatorUpdate{
|
||||
// Address:
|
||||
PubKey: pkp,
|
||||
Power: power,
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("key type %s not supported", keyType))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -52,16 +52,6 @@ var (
|
||||
jsonpbUnmarshaller = jsonpb.Unmarshaler{}
|
||||
)
|
||||
|
||||
func (r *ResponseSetOption) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
}
|
||||
|
||||
func (r *ResponseSetOption) UnmarshalJSON(b []byte) error {
|
||||
reader := bytes.NewBuffer(b)
|
||||
return jsonpbUnmarshaller.Unmarshal(reader, r)
|
||||
}
|
||||
|
||||
func (r *ResponseCheckTx) MarshalJSON() ([]byte, error) {
|
||||
s, err := jsonpbMarshaller.MarshalToString(r)
|
||||
return []byte(s), err
|
||||
@@ -126,6 +116,5 @@ var _ jsonRoundTripper = (*ResponseCommit)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseQuery)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseDeliverTx)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseCheckTx)(nil)
|
||||
var _ jsonRoundTripper = (*ResponseSetOption)(nil)
|
||||
|
||||
var _ jsonRoundTripper = (*EventAttribute)(nil)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -27,7 +27,5 @@ func (v ValidatorUpdates) Less(i, j int) bool {
|
||||
}
|
||||
|
||||
func (v ValidatorUpdates) Swap(i, j int) {
|
||||
v1 := v[i]
|
||||
v[i] = v[j]
|
||||
v[j] = v1
|
||||
v[i], v[j] = v[j], v[i]
|
||||
}
|
||||
|
||||
12
appveyor.yml
12
appveyor.yml
@@ -1,12 +0,0 @@
|
||||
version: 1.0.{build}
|
||||
configuration: Release
|
||||
platform:
|
||||
- x64
|
||||
- x86
|
||||
clone_folder: c:\go\path\src\github.com\tendermint\tendermint
|
||||
before_build:
|
||||
- cmd: set GOPATH=%GOROOT%\path
|
||||
- cmd: set PATH=%GOPATH%\bin;%PATH%
|
||||
build_script:
|
||||
- cmd: make test
|
||||
test: off
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
// `peerID` identifies the peer and reason characterizes the specific
|
||||
// behaviour performed by the peer.
|
||||
type PeerBehaviour struct {
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ type badMessage struct {
|
||||
}
|
||||
|
||||
// BadMessage returns a badMessage PeerBehaviour.
|
||||
func BadMessage(peerID p2p.ID, explanation string) PeerBehaviour {
|
||||
func BadMessage(peerID p2p.NodeID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: badMessage{explanation}}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ type messageOutOfOrder struct {
|
||||
}
|
||||
|
||||
// MessageOutOfOrder returns a messagOutOfOrder PeerBehaviour.
|
||||
func MessageOutOfOrder(peerID p2p.ID, explanation string) PeerBehaviour {
|
||||
func MessageOutOfOrder(peerID p2p.NodeID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: messageOutOfOrder{explanation}}
|
||||
}
|
||||
|
||||
@@ -35,7 +35,7 @@ type consensusVote struct {
|
||||
}
|
||||
|
||||
// ConsensusVote returns a consensusVote PeerBehaviour.
|
||||
func ConsensusVote(peerID p2p.ID, explanation string) PeerBehaviour {
|
||||
func ConsensusVote(peerID p2p.NodeID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: consensusVote{explanation}}
|
||||
}
|
||||
|
||||
@@ -44,6 +44,6 @@ type blockPart struct {
|
||||
}
|
||||
|
||||
// BlockPart returns blockPart PeerBehaviour.
|
||||
func BlockPart(peerID p2p.ID, explanation string) PeerBehaviour {
|
||||
func BlockPart(peerID p2p.NodeID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: blockPart{explanation}}
|
||||
}
|
||||
|
||||
@@ -51,14 +51,14 @@ func (spbr *SwitchReporter) Report(behaviour PeerBehaviour) error {
|
||||
// behaviour in manufactured scenarios.
|
||||
type MockReporter struct {
|
||||
mtx tmsync.RWMutex
|
||||
pb map[p2p.ID][]PeerBehaviour
|
||||
pb map[p2p.NodeID][]PeerBehaviour
|
||||
}
|
||||
|
||||
// NewMockReporter returns a Reporter which records all reported
|
||||
// behaviours in memory.
|
||||
func NewMockReporter() *MockReporter {
|
||||
return &MockReporter{
|
||||
pb: map[p2p.ID][]PeerBehaviour{},
|
||||
pb: map[p2p.NodeID][]PeerBehaviour{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -72,7 +72,7 @@ func (mpbr *MockReporter) Report(behaviour PeerBehaviour) error {
|
||||
}
|
||||
|
||||
// GetBehaviours returns all behaviours reported on the peer identified by peerID.
|
||||
func (mpbr *MockReporter) GetBehaviours(peerID p2p.ID) []PeerBehaviour {
|
||||
func (mpbr *MockReporter) GetBehaviours(peerID p2p.NodeID) []PeerBehaviour {
|
||||
mpbr.mtx.RLock()
|
||||
defer mpbr.mtx.RUnlock()
|
||||
if items, ok := mpbr.pb[peerID]; ok {
|
||||
|
||||
@@ -11,7 +11,7 @@ import (
|
||||
// TestMockReporter tests the MockReporter's ability to store reported
|
||||
// peer behaviour in memory indexed by the peerID.
|
||||
func TestMockReporter(t *testing.T) {
|
||||
var peerID p2p.ID = "MockPeer"
|
||||
var peerID p2p.NodeID = "MockPeer"
|
||||
pr := bh.NewMockReporter()
|
||||
|
||||
behaviours := pr.GetBehaviours(peerID)
|
||||
@@ -34,7 +34,7 @@ func TestMockReporter(t *testing.T) {
|
||||
}
|
||||
|
||||
type scriptItem struct {
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
behaviour bh.PeerBehaviour
|
||||
}
|
||||
|
||||
@@ -76,10 +76,10 @@ func equalBehaviours(a []bh.PeerBehaviour, b []bh.PeerBehaviour) bool {
|
||||
// freequencies that those behaviours occur.
|
||||
func TestEqualPeerBehaviours(t *testing.T) {
|
||||
var (
|
||||
peerID p2p.ID = "MockPeer"
|
||||
consensusVote = bh.ConsensusVote(peerID, "voted")
|
||||
blockPart = bh.BlockPart(peerID, "blocked")
|
||||
equals = []struct {
|
||||
peerID p2p.NodeID = "MockPeer"
|
||||
consensusVote = bh.ConsensusVote(peerID, "voted")
|
||||
blockPart = bh.BlockPart(peerID, "blocked")
|
||||
equals = []struct {
|
||||
left []bh.PeerBehaviour
|
||||
right []bh.PeerBehaviour
|
||||
}{
|
||||
@@ -128,7 +128,7 @@ func TestEqualPeerBehaviours(t *testing.T) {
|
||||
func TestMockPeerBehaviourReporterConcurrency(t *testing.T) {
|
||||
var (
|
||||
behaviourScript = []struct {
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
behaviours []bh.PeerBehaviour
|
||||
}{
|
||||
{"1", []bh.PeerBehaviour{bh.ConsensusVote("1", "")}},
|
||||
|
||||
17
blockchain/doc.go
Normal file
17
blockchain/doc.go
Normal file
@@ -0,0 +1,17 @@
|
||||
/*
|
||||
Package blockchain provides two implementations of the fast-sync protocol.
|
||||
|
||||
- v0 was the very first implementation. it's battle tested, but does not have a
|
||||
lot of test coverage.
|
||||
- v2 is the newest implementation, with a focus on testability and readability.
|
||||
|
||||
Check out ADR-40 for the formal model and requirements.
|
||||
|
||||
# Termination criteria
|
||||
|
||||
1. the maximum peer height is reached
|
||||
2. termination timeout is triggered, which is set if the peer set is empty or
|
||||
there are no pending requests.
|
||||
|
||||
*/
|
||||
package blockchain
|
||||
@@ -1,110 +1,12 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// NOTE: keep up to date with bcproto.BlockResponse
|
||||
BlockResponseMessagePrefixSize = 4
|
||||
BlockResponseMessageFieldKeySize = 1
|
||||
MaxMsgSize = types.MaxBlockSizeBytes +
|
||||
BlockResponseMessagePrefixSize +
|
||||
BlockResponseMessageFieldKeySize
|
||||
MaxMsgSize = types.MaxBlockSizeBytes +
|
||||
bcproto.BlockResponseMessagePrefixSize +
|
||||
bcproto.BlockResponseMessageFieldKeySize
|
||||
)
|
||||
|
||||
// EncodeMsg encodes a Protobuf message
|
||||
func EncodeMsg(pb proto.Message) ([]byte, error) {
|
||||
msg := bcproto.Message{}
|
||||
|
||||
switch pb := pb.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
msg.Sum = &bcproto.Message_BlockRequest{BlockRequest: pb}
|
||||
case *bcproto.BlockResponse:
|
||||
msg.Sum = &bcproto.Message_BlockResponse{BlockResponse: pb}
|
||||
case *bcproto.NoBlockResponse:
|
||||
msg.Sum = &bcproto.Message_NoBlockResponse{NoBlockResponse: pb}
|
||||
case *bcproto.StatusRequest:
|
||||
msg.Sum = &bcproto.Message_StatusRequest{StatusRequest: pb}
|
||||
case *bcproto.StatusResponse:
|
||||
msg.Sum = &bcproto.Message_StatusResponse{StatusResponse: pb}
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", pb)
|
||||
}
|
||||
|
||||
bz, err := proto.Marshal(&msg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to marshal %T: %w", pb, err)
|
||||
}
|
||||
|
||||
return bz, nil
|
||||
}
|
||||
|
||||
// DecodeMsg decodes a Protobuf message.
|
||||
func DecodeMsg(bz []byte) (proto.Message, error) {
|
||||
pb := &bcproto.Message{}
|
||||
|
||||
err := proto.Unmarshal(bz, pb)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch msg := pb.Sum.(type) {
|
||||
case *bcproto.Message_BlockRequest:
|
||||
return msg.BlockRequest, nil
|
||||
case *bcproto.Message_BlockResponse:
|
||||
return msg.BlockResponse, nil
|
||||
case *bcproto.Message_NoBlockResponse:
|
||||
return msg.NoBlockResponse, nil
|
||||
case *bcproto.Message_StatusRequest:
|
||||
return msg.StatusRequest, nil
|
||||
case *bcproto.Message_StatusResponse:
|
||||
return msg.StatusResponse, nil
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown message type %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
// ValidateMsg validates a message.
|
||||
func ValidateMsg(pb proto.Message) error {
|
||||
if pb == nil {
|
||||
return errors.New("message cannot be nil")
|
||||
}
|
||||
|
||||
switch msg := pb.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
if msg.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
case *bcproto.BlockResponse:
|
||||
_, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case *bcproto.NoBlockResponse:
|
||||
if msg.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
case *bcproto.StatusResponse:
|
||||
if msg.Base < 0 {
|
||||
return errors.New("negative Base")
|
||||
}
|
||||
if msg.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
if msg.Base > msg.Height {
|
||||
return fmt.Errorf("base %v cannot be greater than height %v", msg.Base, msg.Height)
|
||||
}
|
||||
case *bcproto.StatusRequest:
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unknown message type %T", msg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -30,6 +30,7 @@ eg, L = latency = 0.1s
|
||||
const (
|
||||
requestIntervalMS = 2
|
||||
maxTotalRequesters = 600
|
||||
maxPeerErrBuffer = 1000
|
||||
maxPendingRequests = maxTotalRequesters
|
||||
maxPendingRequestsPerPeer = 20
|
||||
|
||||
@@ -58,17 +59,24 @@ var peerTimeout = 15 * time.Second // not const so we can override with tests
|
||||
are not at peer limits, we can probably switch to consensus reactor
|
||||
*/
|
||||
|
||||
// BlockRequest stores a block request identified by the block Height and the
|
||||
// PeerID responsible for delivering the block.
|
||||
type BlockRequest struct {
|
||||
Height int64
|
||||
PeerID p2p.NodeID
|
||||
}
|
||||
|
||||
// BlockPool keeps track of the fast sync peers, block requests and block responses.
|
||||
type BlockPool struct {
|
||||
service.BaseService
|
||||
startTime time.Time
|
||||
lastAdvance time.Time
|
||||
|
||||
mtx tmsync.Mutex
|
||||
mtx tmsync.RWMutex
|
||||
// block requests
|
||||
requesters map[int64]*bpRequester
|
||||
height int64 // the lowest key in requesters.
|
||||
// peers
|
||||
peers map[p2p.ID]*bpPeer
|
||||
peers map[p2p.NodeID]*bpPeer
|
||||
maxPeerHeight int64 // the biggest reported height
|
||||
|
||||
// atomic
|
||||
@@ -82,7 +90,7 @@ type BlockPool struct {
|
||||
// requests and errors will be sent to requestsCh and errorsCh accordingly.
|
||||
func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool {
|
||||
bp := &BlockPool{
|
||||
peers: make(map[p2p.ID]*bpPeer),
|
||||
peers: make(map[p2p.NodeID]*bpPeer),
|
||||
|
||||
requesters: make(map[int64]*bpRequester),
|
||||
height: start,
|
||||
@@ -98,8 +106,8 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
|
||||
// OnStart implements service.Service by spawning requesters routine and recording
|
||||
// pool's start time.
|
||||
func (pool *BlockPool) OnStart() error {
|
||||
pool.lastAdvance = time.Now()
|
||||
go pool.makeRequestersRoutine()
|
||||
pool.startTime = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -134,6 +142,7 @@ func (pool *BlockPool) removeTimedoutPeers() {
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
for _, peer := range pool.peers {
|
||||
// check if peer timed out
|
||||
if !peer.didTimeout && peer.numPending > 0 {
|
||||
curRate := peer.recvMonitor.Status().CurRate
|
||||
// curRate can be 0 on start
|
||||
@@ -147,6 +156,7 @@ func (pool *BlockPool) removeTimedoutPeers() {
|
||||
peer.didTimeout = true
|
||||
}
|
||||
}
|
||||
|
||||
if peer.didTimeout {
|
||||
pool.removePeer(peer.id)
|
||||
}
|
||||
@@ -156,33 +166,25 @@ func (pool *BlockPool) removeTimedoutPeers() {
|
||||
// GetStatus returns pool's height, numPending requests and the number of
|
||||
// requesters.
|
||||
func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequesters int) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
|
||||
return pool.height, atomic.LoadInt32(&pool.numPending), len(pool.requesters)
|
||||
}
|
||||
|
||||
// IsCaughtUp returns true if this node is caught up, false - otherwise.
|
||||
// TODO: relax conditions, prevent abuse.
|
||||
func (pool *BlockPool) IsCaughtUp() bool {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
|
||||
// Need at least 1 peer to be considered caught up.
|
||||
if len(pool.peers) == 0 {
|
||||
pool.Logger.Debug("Blockpool has no peers")
|
||||
return false
|
||||
}
|
||||
|
||||
// Some conditions to determine if we're caught up.
|
||||
// Ensures we've either received a block or waited some amount of time,
|
||||
// and that we're synced to the highest known height.
|
||||
// Note we use maxPeerHeight - 1 because to sync block H requires block H+1
|
||||
// NOTE: we use maxPeerHeight - 1 because to sync block H requires block H+1
|
||||
// to verify the LastCommit.
|
||||
receivedBlockOrTimedOut := pool.height > 0 || time.Since(pool.startTime) > 5*time.Second
|
||||
ourChainIsLongestAmongPeers := pool.maxPeerHeight == 0 || pool.height >= (pool.maxPeerHeight-1)
|
||||
isCaughtUp := receivedBlockOrTimedOut && ourChainIsLongestAmongPeers
|
||||
return isCaughtUp
|
||||
return pool.height >= (pool.maxPeerHeight - 1)
|
||||
}
|
||||
|
||||
// PeekTwoBlocks returns blocks at pool.height and pool.height+1.
|
||||
@@ -190,8 +192,8 @@ func (pool *BlockPool) IsCaughtUp() bool {
|
||||
// So we peek two blocks at a time.
|
||||
// The caller will verify the commit.
|
||||
func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
|
||||
if r := pool.requesters[pool.height]; r != nil {
|
||||
first = r.getBlock()
|
||||
@@ -209,16 +211,12 @@ func (pool *BlockPool) PopRequest() {
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
if r := pool.requesters[pool.height]; r != nil {
|
||||
/* The block can disappear at any time, due to removePeer().
|
||||
if r := pool.requesters[pool.height]; r == nil || r.block == nil {
|
||||
PanicSanity("PopRequest() requires a valid block")
|
||||
}
|
||||
*/
|
||||
if err := r.Stop(); err != nil {
|
||||
pool.Logger.Error("Error stopping requester", "err", err)
|
||||
}
|
||||
delete(pool.requesters, pool.height)
|
||||
pool.height++
|
||||
pool.lastAdvance = time.Now()
|
||||
} else {
|
||||
panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height))
|
||||
}
|
||||
@@ -227,13 +225,13 @@ func (pool *BlockPool) PopRequest() {
|
||||
// RedoRequest invalidates the block at pool.height,
|
||||
// Remove the peer and redo request from others.
|
||||
// Returns the ID of the removed peer.
|
||||
func (pool *BlockPool) RedoRequest(height int64) p2p.ID {
|
||||
func (pool *BlockPool) RedoRequest(height int64) p2p.NodeID {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
request := pool.requesters[height]
|
||||
peerID := request.getPeerID()
|
||||
if peerID != p2p.ID("") {
|
||||
if peerID != p2p.NodeID("") {
|
||||
// RemovePeer will redo all requesters associated with this peer.
|
||||
pool.removePeer(peerID)
|
||||
}
|
||||
@@ -242,20 +240,14 @@ func (pool *BlockPool) RedoRequest(height int64) p2p.ID {
|
||||
|
||||
// AddBlock validates that the block comes from the peer it was expected from and calls the requester to store it.
|
||||
// TODO: ensure that blocks come in order for each peer.
|
||||
func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int) {
|
||||
func (pool *BlockPool) AddBlock(peerID p2p.NodeID, block *types.Block, blockSize int) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
requester := pool.requesters[block.Height]
|
||||
if requester == nil {
|
||||
pool.Logger.Info(
|
||||
"peer sent us a block we didn't expect",
|
||||
"peer",
|
||||
peerID,
|
||||
"curHeight",
|
||||
pool.height,
|
||||
"blockHeight",
|
||||
block.Height)
|
||||
pool.Logger.Error("peer sent us a block we didn't expect",
|
||||
"peer", peerID, "curHeight", pool.height, "blockHeight", block.Height)
|
||||
diff := pool.height - block.Height
|
||||
if diff < 0 {
|
||||
diff *= -1
|
||||
@@ -273,20 +265,29 @@ func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int
|
||||
peer.decrPending(blockSize)
|
||||
}
|
||||
} else {
|
||||
pool.Logger.Info("invalid peer", "peer", peerID, "blockHeight", block.Height)
|
||||
pool.sendError(errors.New("invalid peer"), peerID)
|
||||
err := errors.New("requester is different or block already exists")
|
||||
pool.Logger.Error(err.Error(), "peer", peerID, "requester", requester.getPeerID(), "blockHeight", block.Height)
|
||||
pool.sendError(err, peerID)
|
||||
}
|
||||
}
|
||||
|
||||
// MaxPeerHeight returns the highest reported height.
|
||||
func (pool *BlockPool) MaxPeerHeight() int64 {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
return pool.maxPeerHeight
|
||||
}
|
||||
|
||||
// LastAdvance returns the time when the last block was processed (or start
|
||||
// time if no blocks were processed).
|
||||
func (pool *BlockPool) LastAdvance() time.Time {
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
return pool.lastAdvance
|
||||
}
|
||||
|
||||
// SetPeerRange sets the peer's alleged blockchain base and height.
|
||||
func (pool *BlockPool) SetPeerRange(peerID p2p.ID, base int64, height int64) {
|
||||
func (pool *BlockPool) SetPeerRange(peerID p2p.NodeID, base int64, height int64) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
@@ -307,14 +308,14 @@ func (pool *BlockPool) SetPeerRange(peerID p2p.ID, base int64, height int64) {
|
||||
|
||||
// RemovePeer removes the peer with peerID from the pool. If there's no peer
|
||||
// with peerID, function is a no-op.
|
||||
func (pool *BlockPool) RemovePeer(peerID p2p.ID) {
|
||||
func (pool *BlockPool) RemovePeer(peerID p2p.NodeID) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
pool.removePeer(peerID)
|
||||
}
|
||||
|
||||
func (pool *BlockPool) removePeer(peerID p2p.ID) {
|
||||
func (pool *BlockPool) removePeer(peerID p2p.NodeID) {
|
||||
for _, requester := range pool.requesters {
|
||||
if requester.getPeerID() == peerID {
|
||||
requester.redo(peerID)
|
||||
@@ -395,14 +396,14 @@ func (pool *BlockPool) requestersLen() int64 {
|
||||
return int64(len(pool.requesters))
|
||||
}
|
||||
|
||||
func (pool *BlockPool) sendRequest(height int64, peerID p2p.ID) {
|
||||
func (pool *BlockPool) sendRequest(height int64, peerID p2p.NodeID) {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
pool.requestsCh <- BlockRequest{height, peerID}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
|
||||
func (pool *BlockPool) sendError(err error, peerID p2p.NodeID) {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
@@ -436,7 +437,7 @@ type bpPeer struct {
|
||||
height int64
|
||||
base int64
|
||||
pool *BlockPool
|
||||
id p2p.ID
|
||||
id p2p.NodeID
|
||||
recvMonitor *flow.Monitor
|
||||
|
||||
timeout *time.Timer
|
||||
@@ -444,7 +445,7 @@ type bpPeer struct {
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func newBPPeer(pool *BlockPool, peerID p2p.ID, base int64, height int64) *bpPeer {
|
||||
func newBPPeer(pool *BlockPool, peerID p2p.NodeID, base int64, height int64) *bpPeer {
|
||||
peer := &bpPeer{
|
||||
pool: pool,
|
||||
id: peerID,
|
||||
@@ -509,10 +510,10 @@ type bpRequester struct {
|
||||
pool *BlockPool
|
||||
height int64
|
||||
gotBlockCh chan struct{}
|
||||
redoCh chan p2p.ID //redo may send multitime, add peerId to identify repeat
|
||||
redoCh chan p2p.NodeID // redo may send multitime, add peerId to identify repeat
|
||||
|
||||
mtx tmsync.Mutex
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
@@ -521,7 +522,7 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester {
|
||||
pool: pool,
|
||||
height: height,
|
||||
gotBlockCh: make(chan struct{}, 1),
|
||||
redoCh: make(chan p2p.ID, 1),
|
||||
redoCh: make(chan p2p.NodeID, 1),
|
||||
|
||||
peerID: "",
|
||||
block: nil,
|
||||
@@ -536,7 +537,7 @@ func (bpr *bpRequester) OnStart() error {
|
||||
}
|
||||
|
||||
// Returns true if the peer matches and block doesn't already exist.
|
||||
func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.ID) bool {
|
||||
func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.NodeID) bool {
|
||||
bpr.mtx.Lock()
|
||||
if bpr.block != nil || bpr.peerID != peerID {
|
||||
bpr.mtx.Unlock()
|
||||
@@ -558,7 +559,7 @@ func (bpr *bpRequester) getBlock() *types.Block {
|
||||
return bpr.block
|
||||
}
|
||||
|
||||
func (bpr *bpRequester) getPeerID() p2p.ID {
|
||||
func (bpr *bpRequester) getPeerID() p2p.NodeID {
|
||||
bpr.mtx.Lock()
|
||||
defer bpr.mtx.Unlock()
|
||||
return bpr.peerID
|
||||
@@ -580,7 +581,7 @@ func (bpr *bpRequester) reset() {
|
||||
// Tells bpRequester to pick another peer and try again.
|
||||
// NOTE: Nonblocking, and does nothing if another redo
|
||||
// was already requested.
|
||||
func (bpr *bpRequester) redo(peerID p2p.ID) {
|
||||
func (bpr *bpRequester) redo(peerID p2p.NodeID) {
|
||||
select {
|
||||
case bpr.redoCh <- peerID:
|
||||
default:
|
||||
@@ -601,7 +602,6 @@ OUTER_LOOP:
|
||||
}
|
||||
peer = bpr.pool.pickIncrAvailablePeer(bpr.height)
|
||||
if peer == nil {
|
||||
//log.Info("No peers available", "height", height)
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
continue PICK_PEER_LOOP
|
||||
}
|
||||
@@ -638,10 +638,3 @@ OUTER_LOOP:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BlockRequest stores a block request identified by the block Height and the PeerID responsible for
|
||||
// delivering the block
|
||||
type BlockRequest struct {
|
||||
Height int64
|
||||
PeerID p2p.ID
|
||||
}
|
||||
|
||||
@@ -19,10 +19,10 @@ func init() {
|
||||
}
|
||||
|
||||
type testPeer struct {
|
||||
id p2p.ID
|
||||
id p2p.NodeID
|
||||
base int64
|
||||
height int64
|
||||
inputChan chan inputData //make sure each peer's data is sequential
|
||||
inputChan chan inputData // make sure each peer's data is sequential
|
||||
}
|
||||
|
||||
type inputData struct {
|
||||
@@ -49,7 +49,7 @@ func (p testPeer) simulateInput(input inputData) {
|
||||
// input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height)
|
||||
}
|
||||
|
||||
type testPeers map[p2p.ID]testPeer
|
||||
type testPeers map[p2p.NodeID]testPeer
|
||||
|
||||
func (ps testPeers) start() {
|
||||
for _, v := range ps {
|
||||
@@ -66,7 +66,7 @@ func (ps testPeers) stop() {
|
||||
func makePeers(numPeers int, minHeight, maxHeight int64) testPeers {
|
||||
peers := make(testPeers, numPeers)
|
||||
for i := 0; i < numPeers; i++ {
|
||||
peerID := p2p.ID(tmrand.Str(12))
|
||||
peerID := p2p.NodeID(tmrand.Str(12))
|
||||
height := minHeight + tmrand.Int63n(maxHeight-minHeight)
|
||||
base := minHeight + int64(i)
|
||||
if base > height {
|
||||
@@ -182,7 +182,7 @@ func TestBlockPoolTimeout(t *testing.T) {
|
||||
|
||||
// Pull from channels
|
||||
counter := 0
|
||||
timedOut := map[p2p.ID]struct{}{}
|
||||
timedOut := map[p2p.NodeID]struct{}{}
|
||||
for {
|
||||
select {
|
||||
case err := <-errorsCh:
|
||||
@@ -203,7 +203,7 @@ func TestBlockPoolTimeout(t *testing.T) {
|
||||
func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
peers := make(testPeers, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
peerID := p2p.ID(fmt.Sprintf("%d", i+1))
|
||||
peerID := p2p.NodeID(fmt.Sprintf("%d", i+1))
|
||||
height := int64(i + 1)
|
||||
peers[peerID] = testPeer{peerID, 0, height, make(chan inputData)}
|
||||
}
|
||||
@@ -227,10 +227,10 @@ func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
assert.EqualValues(t, 10, pool.MaxPeerHeight())
|
||||
|
||||
// remove not-existing peer
|
||||
assert.NotPanics(t, func() { pool.RemovePeer(p2p.ID("Superman")) })
|
||||
assert.NotPanics(t, func() { pool.RemovePeer(p2p.NodeID("Superman")) })
|
||||
|
||||
// remove peer with biggest height
|
||||
pool.RemovePeer(p2p.ID("10"))
|
||||
pool.RemovePeer(p2p.NodeID("10"))
|
||||
assert.EqualValues(t, 9, pool.MaxPeerHeight())
|
||||
|
||||
// remove all peers
|
||||
|
||||
@@ -2,11 +2,12 @@ package v0
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
@@ -14,31 +15,55 @@ import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
_ service.Service = (*Reactor)(nil)
|
||||
|
||||
// ChannelShims contains a map of ChannelDescriptorShim objects, where each
|
||||
// object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding
|
||||
// p2p proto.Message the new p2p Channel is responsible for handling.
|
||||
//
|
||||
//
|
||||
// TODO: Remove once p2p refactor is complete.
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{
|
||||
BlockchainChannel: {
|
||||
MsgType: new(bcproto.Message),
|
||||
Descriptor: &p2p.ChannelDescriptor{
|
||||
ID: byte(BlockchainChannel),
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 1000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
},
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
|
||||
BlockchainChannel = byte(0x40)
|
||||
// BlockchainChannel is a channel for blocks and status updates
|
||||
BlockchainChannel = p2p.ChannelID(0x40)
|
||||
|
||||
trySyncIntervalMS = 10
|
||||
|
||||
// stop syncing when last block's time is
|
||||
// within this much of the system time.
|
||||
// stopSyncingDurationMinutes = 10
|
||||
|
||||
// ask for best height every 10s
|
||||
statusUpdateIntervalSeconds = 10
|
||||
|
||||
// check if we should switch to consensus reactor
|
||||
switchToConsensusIntervalSeconds = 1
|
||||
|
||||
// switch to consensus after this duration of inactivity
|
||||
syncTimeout = 60 * time.Second
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// for when we switch from blockchain reactor and fast sync to
|
||||
// the consensus machine
|
||||
// For when we switch from blockchain reactor and fast sync to the consensus
|
||||
// machine.
|
||||
SwitchToConsensus(state sm.State, skipWAL bool)
|
||||
}
|
||||
|
||||
type peerError struct {
|
||||
err error
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (e peerError) Error() string {
|
||||
@@ -46,381 +71,510 @@ func (e peerError) Error() string {
|
||||
}
|
||||
|
||||
// BlockchainReactor handles long-term catchup syncing.
|
||||
type BlockchainReactor struct {
|
||||
p2p.BaseReactor
|
||||
type Reactor struct {
|
||||
service.BaseService
|
||||
|
||||
// immutable
|
||||
initialState sm.State
|
||||
|
||||
blockExec *sm.BlockExecutor
|
||||
store *store.BlockStore
|
||||
pool *BlockPool
|
||||
fastSync bool
|
||||
blockExec *sm.BlockExecutor
|
||||
store *store.BlockStore
|
||||
pool *BlockPool
|
||||
consReactor consensusReactor
|
||||
fastSync bool
|
||||
|
||||
blockchainCh *p2p.Channel
|
||||
peerUpdates *p2p.PeerUpdatesCh
|
||||
closeCh chan struct{}
|
||||
|
||||
requestsCh <-chan BlockRequest
|
||||
errorsCh <-chan peerError
|
||||
|
||||
// poolWG is used to synchronize the graceful shutdown of the poolRoutine and
|
||||
// requestRoutine spawned goroutines when stopping the reactor and before
|
||||
// stopping the p2p Channel(s).
|
||||
poolWG sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewBlockchainReactor returns new reactor instance.
|
||||
func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
|
||||
fastSync bool) *BlockchainReactor {
|
||||
|
||||
// NewReactor returns new reactor instance.
|
||||
func NewReactor(
|
||||
logger log.Logger,
|
||||
state sm.State,
|
||||
blockExec *sm.BlockExecutor,
|
||||
store *store.BlockStore,
|
||||
consReactor consensusReactor,
|
||||
blockchainCh *p2p.Channel,
|
||||
peerUpdates *p2p.PeerUpdatesCh,
|
||||
fastSync bool,
|
||||
) (*Reactor, error) {
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
|
||||
store.Height()))
|
||||
return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height())
|
||||
}
|
||||
|
||||
requestsCh := make(chan BlockRequest, maxTotalRequesters)
|
||||
|
||||
const capacity = 1000 // must be bigger than peers count
|
||||
errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock
|
||||
|
||||
startHeight := store.Height() + 1
|
||||
if startHeight == 1 {
|
||||
startHeight = state.InitialHeight
|
||||
}
|
||||
pool := NewBlockPool(startHeight, requestsCh, errorsCh)
|
||||
|
||||
bcR := &BlockchainReactor{
|
||||
requestsCh := make(chan BlockRequest, maxTotalRequesters)
|
||||
errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count.
|
||||
|
||||
r := &Reactor{
|
||||
initialState: state,
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: pool,
|
||||
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
|
||||
consReactor: consReactor,
|
||||
fastSync: fastSync,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
blockchainCh: blockchainCh,
|
||||
peerUpdates: peerUpdates,
|
||||
closeCh: make(chan struct{}),
|
||||
}
|
||||
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
|
||||
return bcR
|
||||
|
||||
r.BaseService = *service.NewBaseService(logger, "Blockchain", r)
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// SetLogger implements service.Service by setting the logger on reactor and pool.
|
||||
func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
|
||||
bcR.BaseService.Logger = l
|
||||
bcR.pool.Logger = l
|
||||
}
|
||||
|
||||
// OnStart implements service.Service.
|
||||
func (bcR *BlockchainReactor) OnStart() error {
|
||||
if bcR.fastSync {
|
||||
err := bcR.pool.Start()
|
||||
if err != nil {
|
||||
// OnStart starts separate go routines for each p2p Channel and listens for
|
||||
// envelopes on each. In addition, it also listens for peer updates and handles
|
||||
// messages on that p2p channel accordingly. The caller must be sure to execute
|
||||
// OnStop to ensure the outbound p2p Channels are closed.
|
||||
//
|
||||
// If fastSync is enabled, we also start the pool and the pool processing
|
||||
// goroutine. If the pool fails to start, an error is returned.
|
||||
func (r *Reactor) OnStart() error {
|
||||
if r.fastSync {
|
||||
if err := r.pool.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
go bcR.poolRoutine(false)
|
||||
|
||||
r.poolWG.Add(1)
|
||||
go r.poolRoutine(false)
|
||||
}
|
||||
|
||||
go r.processBlockchainCh()
|
||||
go r.processPeerUpdates()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
|
||||
func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error {
|
||||
bcR.fastSync = true
|
||||
bcR.initialState = state
|
||||
|
||||
bcR.pool.height = state.LastBlockHeight + 1
|
||||
err := bcR.pool.Start()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
go bcR.poolRoutine(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements service.Service.
|
||||
func (bcR *BlockchainReactor) OnStop() {
|
||||
if bcR.fastSync {
|
||||
if err := bcR.pool.Stop(); err != nil {
|
||||
bcR.Logger.Error("Error stopping pool", "err", err)
|
||||
// OnStop stops the reactor by signaling to all spawned goroutines to exit and
|
||||
// blocking until they all exit.
|
||||
func (r *Reactor) OnStop() {
|
||||
if r.fastSync {
|
||||
if err := r.pool.Stop(); err != nil {
|
||||
r.Logger.Error("failed to stop pool", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// wait for the poolRoutine and requestRoutine goroutines to gracefully exit
|
||||
r.poolWG.Wait()
|
||||
|
||||
// Close closeCh to signal to all spawned goroutines to gracefully exit. All
|
||||
// p2p Channels should execute Close().
|
||||
close(r.closeCh)
|
||||
|
||||
// Wait for all p2p Channels to be closed before returning. This ensures we
|
||||
// can easily reason about synchronization of all p2p Channels and ensure no
|
||||
// panics will occur.
|
||||
<-r.blockchainCh.Done()
|
||||
<-r.peerUpdates.Done()
|
||||
}
|
||||
|
||||
// GetChannels implements Reactor
|
||||
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: BlockchainChannel,
|
||||
Priority: 10,
|
||||
SendQueueCapacity: 1000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height()})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
peer.Send(BlockchainChannel, msgBytes)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
// bcStatusResponseMessage from the peer and call pool.SetPeerRange
|
||||
}
|
||||
|
||||
// RemovePeer implements Reactor by removing peer from the pool.
|
||||
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
bcR.pool.RemovePeer(peer.ID())
|
||||
}
|
||||
|
||||
// respondToPeer loads a block and sends it to the requesting peer,
|
||||
// if we have it. Otherwise, we'll respond saying we don't have it.
|
||||
func (bcR *BlockchainReactor) respondToPeer(msg *bcproto.BlockRequest,
|
||||
src p2p.Peer) (queued bool) {
|
||||
|
||||
block := bcR.store.LoadBlock(msg.Height)
|
||||
// respondToPeer loads a block and sends it to the requesting peer, if we have it.
|
||||
// Otherwise, we'll respond saying we do not have it.
|
||||
func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID p2p.NodeID) {
|
||||
block := r.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
bl, err := block.ToProto()
|
||||
blockProto, err := block.ToProto()
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return false
|
||||
r.Logger.Error("failed to convert msg to protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: bl})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not marshal msg", "err", err)
|
||||
return false
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
To: peerID,
|
||||
Message: &bcproto.BlockResponse{Block: blockProto},
|
||||
}
|
||||
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
return
|
||||
}
|
||||
|
||||
bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return false
|
||||
r.Logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height)
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
To: peerID,
|
||||
Message: &bcproto.NoBlockResponse{Height: msg.Height},
|
||||
}
|
||||
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := bc.DecodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
// handleBlockchainMessage handles envelopes sent from peers on the
|
||||
// BlockchainChannel. It returns an error only if the Envelope.Message is unknown
|
||||
// for this channel. This should never be called outside of handleMessage.
|
||||
func (r *Reactor) handleBlockchainMessage(envelope p2p.Envelope) error {
|
||||
logger := r.Logger.With("peer", envelope.From)
|
||||
|
||||
if err = bc.ValidateMsg(msg); err != nil {
|
||||
bcR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
switch msg := envelope.Message.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
bcR.respondToPeer(msg, src)
|
||||
r.respondToPeer(msg, envelope.From)
|
||||
|
||||
case *bcproto.BlockResponse:
|
||||
bi, err := types.BlockFromProto(msg.Block)
|
||||
block, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Block content is invalid", "err", err)
|
||||
return
|
||||
logger.Error("failed to convert block from proto", "err", err)
|
||||
return err
|
||||
}
|
||||
bcR.pool.AddBlock(src.ID(), bi, len(msgBytes))
|
||||
|
||||
r.pool.AddBlock(envelope.From, block, block.Size())
|
||||
|
||||
case *bcproto.StatusRequest:
|
||||
// Send peer our state.
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
|
||||
Height: bcR.store.Height(),
|
||||
Base: bcR.store.Base(),
|
||||
})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobut", "err", err)
|
||||
return
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
To: envelope.From,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Height: r.store.Height(),
|
||||
Base: r.store.Base(),
|
||||
},
|
||||
}
|
||||
src.TrySend(BlockchainChannel, msgBytes)
|
||||
|
||||
case *bcproto.StatusResponse:
|
||||
// Got a peer status. Unverified.
|
||||
bcR.pool.SetPeerRange(src.ID(), msg.Base, msg.Height)
|
||||
r.pool.SetPeerRange(envelope.From, msg.Base, msg.Height)
|
||||
|
||||
case *bcproto.NoBlockResponse:
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height)
|
||||
logger.Debug("peer does not have the requested block", "height", msg.Height)
|
||||
|
||||
default:
|
||||
bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg)))
|
||||
return fmt.Errorf("received unknown message: %T", msg)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Handle messages from the poolReactor telling the reactor what to do.
|
||||
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
|
||||
func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) {
|
||||
|
||||
trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
|
||||
switchToConsensusTicker := time.NewTicker(switchToConsensusIntervalSeconds * time.Second)
|
||||
|
||||
blocksSynced := uint64(0)
|
||||
|
||||
chainID := bcR.initialState.ChainID
|
||||
state := bcR.initialState
|
||||
|
||||
lastHundred := time.Now()
|
||||
lastRate := 0.0
|
||||
|
||||
didProcessCh := make(chan struct{}, 1)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-bcR.Quit():
|
||||
return
|
||||
case <-bcR.pool.Quit():
|
||||
return
|
||||
case request := <-bcR.requestsCh:
|
||||
peer := bcR.Switch.Peers().Get(request.PeerID)
|
||||
if peer == nil {
|
||||
continue
|
||||
}
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: request.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to proto", "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height)
|
||||
}
|
||||
case err := <-bcR.errorsCh:
|
||||
peer := bcR.Switch.Peers().Get(err.peerID)
|
||||
if peer != nil {
|
||||
bcR.Switch.StopPeerForError(peer, err)
|
||||
}
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// ask for status updates
|
||||
go bcR.BroadcastStatusRequest() // nolint: errcheck
|
||||
|
||||
}
|
||||
// handleMessage handles an Envelope sent from a peer on a specific p2p Channel.
|
||||
// It will handle errors and any possible panics gracefully. A caller can handle
|
||||
// any error returned by sending a PeerError on the respective channel.
|
||||
func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error("recovering from processing message panic", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
r.Logger.Debug("received message", "message", envelope.Message, "peer", envelope.From)
|
||||
|
||||
switch chID {
|
||||
case BlockchainChannel:
|
||||
err = r.handleBlockchainMessage(envelope)
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope)
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// processBlockchainCh initiates a blocking process where we listen for and handle
|
||||
// envelopes on the BlockchainChannel. Any error encountered during message
|
||||
// execution will result in a PeerError being sent on the BlockchainChannel. When
|
||||
// the reactor is stopped, we will catch the signal and close the p2p Channel
|
||||
// gracefully.
|
||||
func (r *Reactor) processBlockchainCh() {
|
||||
defer r.blockchainCh.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case envelope := <-r.blockchainCh.In():
|
||||
if err := r.handleMessage(r.blockchainCh.ID(), envelope); err != nil {
|
||||
r.Logger.Error("failed to process message", "ch_id", r.blockchainCh.ID(), "envelope", envelope, "err", err)
|
||||
r.blockchainCh.Error() <- p2p.PeerError{
|
||||
PeerID: envelope.From,
|
||||
Err: err,
|
||||
Severity: p2p.PeerErrorSeverityLow,
|
||||
}
|
||||
}
|
||||
|
||||
case <-r.closeCh:
|
||||
r.Logger.Debug("stopped listening on blockchain channel; closing...")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// processPeerUpdate processes a PeerUpdate.
|
||||
func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
|
||||
r.Logger.Debug("received peer update", "peer", peerUpdate.PeerID, "status", peerUpdate.Status)
|
||||
|
||||
// XXX: Pool#RedoRequest can sometimes give us an empty peer.
|
||||
if len(peerUpdate.PeerID) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
switch peerUpdate.Status {
|
||||
case p2p.PeerStatusNew, p2p.PeerStatusUp:
|
||||
// send a status update the newly added peer
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
To: peerUpdate.PeerID,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: r.store.Base(),
|
||||
Height: r.store.Height(),
|
||||
},
|
||||
}
|
||||
|
||||
case p2p.PeerStatusDown, p2p.PeerStatusRemoved, p2p.PeerStatusBanned:
|
||||
r.pool.RemovePeer(peerUpdate.PeerID)
|
||||
}
|
||||
}
|
||||
|
||||
// processPeerUpdates initiates a blocking process where we listen for and handle
|
||||
// PeerUpdate messages. When the reactor is stopped, we will catch the signal and
|
||||
// close the p2p PeerUpdatesCh gracefully.
|
||||
func (r *Reactor) processPeerUpdates() {
|
||||
defer r.peerUpdates.Close()
|
||||
|
||||
for {
|
||||
select {
|
||||
case peerUpdate := <-r.peerUpdates.Updates():
|
||||
r.processPeerUpdate(peerUpdate)
|
||||
|
||||
case <-r.closeCh:
|
||||
r.Logger.Debug("stopped listening on peer updates channel; closing...")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SwitchToFastSync is called by the state sync reactor when switching to fast
|
||||
// sync.
|
||||
func (r *Reactor) SwitchToFastSync(state sm.State) error {
|
||||
r.fastSync = true
|
||||
r.initialState = state
|
||||
r.pool.height = state.LastBlockHeight + 1
|
||||
|
||||
if err := r.pool.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.poolWG.Add(1)
|
||||
go r.poolRoutine(true)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *Reactor) requestRoutine() {
|
||||
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
|
||||
defer statusUpdateTicker.Stop()
|
||||
|
||||
r.poolWG.Add(1)
|
||||
defer r.poolWG.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-r.closeCh:
|
||||
return
|
||||
|
||||
case <-r.pool.Quit():
|
||||
return
|
||||
|
||||
case request := <-r.requestsCh:
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
To: request.PeerID,
|
||||
Message: &bcproto.BlockRequest{Height: request.Height},
|
||||
}
|
||||
|
||||
case pErr := <-r.errorsCh:
|
||||
r.blockchainCh.Error() <- p2p.PeerError{
|
||||
PeerID: pErr.peerID,
|
||||
Err: pErr.err,
|
||||
Severity: p2p.PeerErrorSeverityLow,
|
||||
}
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
r.poolWG.Add(1)
|
||||
|
||||
go func() {
|
||||
defer r.poolWG.Done()
|
||||
|
||||
r.blockchainCh.Out() <- p2p.Envelope{
|
||||
Broadcast: true,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// poolRoutine handles messages from the poolReactor telling the reactor what to
|
||||
// do.
|
||||
//
|
||||
// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down!
|
||||
func (r *Reactor) poolRoutine(stateSynced bool) {
|
||||
var (
|
||||
trySyncTicker = time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
switchToConsensusTicker = time.NewTicker(switchToConsensusIntervalSeconds * time.Second)
|
||||
|
||||
blocksSynced = uint64(0)
|
||||
|
||||
chainID = r.initialState.ChainID
|
||||
state = r.initialState
|
||||
|
||||
lastHundred = time.Now()
|
||||
lastRate = 0.0
|
||||
|
||||
didProcessCh = make(chan struct{}, 1)
|
||||
)
|
||||
|
||||
defer trySyncTicker.Stop()
|
||||
defer switchToConsensusTicker.Stop()
|
||||
|
||||
go r.requestRoutine()
|
||||
|
||||
defer r.poolWG.Done()
|
||||
|
||||
FOR_LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-switchToConsensusTicker.C:
|
||||
height, numPending, lenRequesters := bcR.pool.GetStatus()
|
||||
outbound, inbound, _ := bcR.Switch.NumPeers()
|
||||
bcR.Logger.Debug("Consensus ticker", "numPending", numPending, "total", lenRequesters,
|
||||
"outbound", outbound, "inbound", inbound)
|
||||
if bcR.pool.IsCaughtUp() {
|
||||
bcR.Logger.Info("Time to switch to consensus reactor!", "height", height)
|
||||
if err := bcR.pool.Stop(); err != nil {
|
||||
bcR.Logger.Error("Error stopping pool", "err", err)
|
||||
}
|
||||
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
|
||||
}
|
||||
// else {
|
||||
// should only happen during testing
|
||||
// }
|
||||
var (
|
||||
height, numPending, lenRequesters = r.pool.GetStatus()
|
||||
lastAdvance = r.pool.LastAdvance()
|
||||
)
|
||||
|
||||
break FOR_LOOP
|
||||
r.Logger.Debug(
|
||||
"consensus ticker",
|
||||
"num_pending", numPending,
|
||||
"total", lenRequesters,
|
||||
"height", height,
|
||||
)
|
||||
|
||||
switch {
|
||||
case r.pool.IsCaughtUp():
|
||||
r.Logger.Info("switching to consensus reactor", "height", height)
|
||||
|
||||
case time.Since(lastAdvance) > syncTimeout:
|
||||
r.Logger.Error("no progress since last advance", "last_advance", lastAdvance)
|
||||
|
||||
default:
|
||||
r.Logger.Info(
|
||||
"not caught up yet",
|
||||
"height", height,
|
||||
"max_peer_height", r.pool.MaxPeerHeight(),
|
||||
"timeout_in", syncTimeout-time.Since(lastAdvance),
|
||||
)
|
||||
continue
|
||||
}
|
||||
|
||||
case <-trySyncTicker.C: // chan time
|
||||
if err := r.pool.Stop(); err != nil {
|
||||
r.Logger.Error("failed to stop pool", "err", err)
|
||||
}
|
||||
|
||||
if r.consReactor != nil {
|
||||
r.consReactor.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
|
||||
}
|
||||
|
||||
break FOR_LOOP
|
||||
|
||||
case <-trySyncTicker.C:
|
||||
select {
|
||||
case didProcessCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
case <-didProcessCh:
|
||||
// NOTE: It is a subtle mistake to process more than a single block
|
||||
// at a time (e.g. 10) here, because we only TrySend 1 request per
|
||||
// loop. The ratio mismatch can result in starving of blocks, a
|
||||
// sudden burst of requests and responses, and repeat.
|
||||
// Consequently, it is better to split these routines rather than
|
||||
// coupling them as it's written here. TODO uncouple from request
|
||||
// routine.
|
||||
// NOTE: It is a subtle mistake to process more than a single block at a
|
||||
// time (e.g. 10) here, because we only send one BlockRequest per loop
|
||||
// iteration. The ratio mismatch can result in starving of blocks, i.e. a
|
||||
// sudden burst of requests and responses, and repeat. Consequently, it is
|
||||
// better to split these routines rather than coupling them as it is
|
||||
// written here.
|
||||
//
|
||||
// TODO: Uncouple from request routine.
|
||||
|
||||
// See if there are any blocks to sync.
|
||||
first, second := bcR.pool.PeekTwoBlocks()
|
||||
//bcR.Logger.Info("TrySync peeked", "first", first, "second", second)
|
||||
// see if there are any blocks to sync
|
||||
first, second := r.pool.PeekTwoBlocks()
|
||||
if first == nil || second == nil {
|
||||
// We need both to sync the first block.
|
||||
// we need both to sync the first block
|
||||
continue FOR_LOOP
|
||||
} else {
|
||||
// Try again quickly next loop.
|
||||
// try again quickly next loop
|
||||
didProcessCh <- struct{}{}
|
||||
}
|
||||
|
||||
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstPartSetHeader := firstParts.Header()
|
||||
firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
|
||||
// Finally, verify the first block using the second's commit
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
var (
|
||||
firstParts = first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstPartSetHeader = firstParts.Header()
|
||||
firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
|
||||
)
|
||||
|
||||
// Finally, verify the first block using the second's commit.
|
||||
//
|
||||
// NOTE: We can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
err := state.Validators.VerifyCommitLight(
|
||||
chainID, firstID, first.Height, second.LastCommit)
|
||||
err := state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error in validation", "err", err)
|
||||
peerID := bcR.pool.RedoRequest(first.Height)
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
if peer != nil {
|
||||
// NOTE: we've already removed the peer's request, but we
|
||||
// still need to clean up the rest.
|
||||
bcR.Switch.StopPeerForError(peer, fmt.Errorf("blockchainReactor validation error: %v", err))
|
||||
err = fmt.Errorf("invalid last commit: %w", err)
|
||||
r.Logger.Error(
|
||||
err.Error(),
|
||||
"last_commit", second.LastCommit,
|
||||
"block_id", firstID,
|
||||
"height", first.Height,
|
||||
)
|
||||
|
||||
// NOTE: We've already removed the peer's request, but we still need
|
||||
// to clean up the rest.
|
||||
peerID := r.pool.RedoRequest(first.Height)
|
||||
r.blockchainCh.Error() <- p2p.PeerError{
|
||||
PeerID: peerID,
|
||||
Err: err,
|
||||
Severity: p2p.PeerErrorSeverityLow,
|
||||
}
|
||||
peerID2 := bcR.pool.RedoRequest(second.Height)
|
||||
peer2 := bcR.Switch.Peers().Get(peerID2)
|
||||
if peer2 != nil && peer2 != peer {
|
||||
// NOTE: we've already removed the peer's request, but we
|
||||
// still need to clean up the rest.
|
||||
bcR.Switch.StopPeerForError(peer2, fmt.Errorf("blockchainReactor validation error: %v", err))
|
||||
|
||||
peerID2 := r.pool.RedoRequest(second.Height)
|
||||
if peerID2 != peerID {
|
||||
r.blockchainCh.Error() <- p2p.PeerError{
|
||||
PeerID: peerID2,
|
||||
Err: err,
|
||||
Severity: p2p.PeerErrorSeverityLow,
|
||||
}
|
||||
}
|
||||
|
||||
continue FOR_LOOP
|
||||
} else {
|
||||
bcR.pool.PopRequest()
|
||||
r.pool.PopRequest()
|
||||
|
||||
// TODO: batch saves so we dont persist to disk every block
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
// TODO: batch saves so we do not persist to disk every block
|
||||
r.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
// TODO: same thing for app - but we would need a way to
|
||||
// get the hash without persisting the state
|
||||
var err error
|
||||
state, _, err = bcR.blockExec.ApplyBlock(state, firstID, first)
|
||||
|
||||
// TODO: Same thing for app - but we would need a way to get the hash
|
||||
// without persisting the state.
|
||||
state, _, err = r.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO This is bad, are we zombie?
|
||||
panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
// TODO: This is bad, are we zombie?
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
|
||||
blocksSynced++
|
||||
|
||||
if blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height,
|
||||
"max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate)
|
||||
r.Logger.Info(
|
||||
"fast sync rate",
|
||||
"height", r.pool.height,
|
||||
"max_peer_height", r.pool.MaxPeerHeight(),
|
||||
"blocks/s", lastRate,
|
||||
)
|
||||
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
}
|
||||
|
||||
continue FOR_LOOP
|
||||
|
||||
case <-bcR.Quit():
|
||||
case <-r.closeCh:
|
||||
break FOR_LOOP
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// BroadcastStatusRequest broadcasts `BlockStore` base and height.
|
||||
func (bcR *BlockchainReactor) BroadcastStatusRequest() error {
|
||||
bm, err := bc.EncodeMsg(&bcproto.StatusRequest{})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to proto", "err", err)
|
||||
return fmt.Errorf("could not convert msg to proto: %w", err)
|
||||
}
|
||||
|
||||
bcR.Switch.Broadcast(BlockchainChannel, bm)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,71 +2,59 @@ package v0
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mempool/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
var config *cfg.Config
|
||||
var rng = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
for i := 0; i < numValidators; i++ {
|
||||
val, privVal := types.RandValidator(randPower, minPower)
|
||||
validators[i] = types.GenesisValidator{
|
||||
PubKey: val.PubKey,
|
||||
Power: val.VotingPower,
|
||||
}
|
||||
privValidators[i] = privVal
|
||||
}
|
||||
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
||||
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: config.ChainID(),
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
type BlockchainReactorPair struct {
|
||||
reactor *BlockchainReactor
|
||||
type reactorTestSuite struct {
|
||||
reactor *Reactor
|
||||
app proxy.AppConns
|
||||
|
||||
peerID p2p.NodeID
|
||||
|
||||
blockchainChannel *p2p.Channel
|
||||
blockchainInCh chan p2p.Envelope
|
||||
blockchainOutCh chan p2p.Envelope
|
||||
blockchainPeerErrCh chan p2p.PeerError
|
||||
|
||||
peerUpdatesCh chan p2p.PeerUpdate
|
||||
peerUpdates *p2p.PeerUpdatesCh
|
||||
}
|
||||
|
||||
func newBlockchainReactor(
|
||||
logger log.Logger,
|
||||
func setup(
|
||||
t *testing.T,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64) BlockchainReactorPair {
|
||||
if len(privVals) != 1 {
|
||||
panic("only support one validator")
|
||||
}
|
||||
maxBlockHeight int64,
|
||||
chBuf uint,
|
||||
) *reactorTestSuite {
|
||||
t.Helper()
|
||||
|
||||
app := &testApp{}
|
||||
require.Len(t, privVals, 1, "only one validator can be supported")
|
||||
|
||||
app := &abci.BaseApplication{}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error start app: %w", err))
|
||||
}
|
||||
require.NoError(t, proxyApp.Start())
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
@@ -74,25 +62,24 @@ func newBlockchainReactor(
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make the BlockchainReactor itself.
|
||||
// NOTE we have to create and commit the blocks first because
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// let's add some blocks in
|
||||
blockExec := sm.NewBlockExecutor(
|
||||
stateStore,
|
||||
log.TestingLogger(),
|
||||
proxyApp.Consensus(),
|
||||
mock.Mempool{},
|
||||
sm.EmptyEvidencePool{},
|
||||
)
|
||||
require.NoError(t, stateStore.Save(state))
|
||||
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil)
|
||||
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
@@ -105,60 +92,197 @@ func newBlockchainReactor(
|
||||
lastBlock.Header.ChainID,
|
||||
time.Now(),
|
||||
)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
lastCommit = types.NewCommit(vote.Height, vote.Round,
|
||||
lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
|
||||
require.NoError(t, err)
|
||||
|
||||
lastCommit = types.NewCommit(
|
||||
vote.Height,
|
||||
vote.Round,
|
||||
lastBlockMeta.BlockID,
|
||||
[]types.CommitSig{vote.CommitSig()},
|
||||
)
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error apply block: %w", err))
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
|
||||
bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||
bcReactor.SetLogger(logger.With("module", "blockchain"))
|
||||
pID := make([]byte, 16)
|
||||
_, err = rng.Read(pID)
|
||||
require.NoError(t, err)
|
||||
|
||||
return BlockchainReactorPair{bcReactor, proxyApp}
|
||||
peerUpdatesCh := make(chan p2p.PeerUpdate, chBuf)
|
||||
|
||||
rts := &reactorTestSuite{
|
||||
app: proxyApp,
|
||||
blockchainInCh: make(chan p2p.Envelope, chBuf),
|
||||
blockchainOutCh: make(chan p2p.Envelope, chBuf),
|
||||
blockchainPeerErrCh: make(chan p2p.PeerError, chBuf),
|
||||
peerUpdatesCh: peerUpdatesCh,
|
||||
peerUpdates: p2p.NewPeerUpdates(peerUpdatesCh),
|
||||
peerID: p2p.NodeID(fmt.Sprintf("%x", pID)),
|
||||
}
|
||||
|
||||
rts.blockchainChannel = p2p.NewChannel(
|
||||
BlockchainChannel,
|
||||
new(bcproto.Message),
|
||||
rts.blockchainInCh,
|
||||
rts.blockchainOutCh,
|
||||
rts.blockchainPeerErrCh,
|
||||
)
|
||||
|
||||
reactor, err := NewReactor(
|
||||
log.TestingLogger().With("module", "blockchain", "node", rts.peerID),
|
||||
state.Copy(),
|
||||
blockExec,
|
||||
blockStore,
|
||||
nil,
|
||||
rts.blockchainChannel,
|
||||
rts.peerUpdates,
|
||||
fastSync,
|
||||
)
|
||||
|
||||
require.NoError(t, err)
|
||||
rts.reactor = reactor
|
||||
|
||||
require.NoError(t, rts.reactor.Start())
|
||||
require.True(t, rts.reactor.IsRunning())
|
||||
|
||||
t.Cleanup(func() {
|
||||
require.NoError(t, rts.reactor.Stop())
|
||||
require.NoError(t, rts.app.Stop())
|
||||
require.False(t, rts.reactor.IsRunning())
|
||||
})
|
||||
|
||||
return rts
|
||||
}
|
||||
|
||||
func TestNoBlockResponse(t *testing.T) {
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
func simulateRouter(primary *reactorTestSuite, suites []*reactorTestSuite, dropChErr bool) {
|
||||
// create a mapping for efficient suite lookup by peer ID
|
||||
suitesByPeerID := make(map[p2p.NodeID]*reactorTestSuite)
|
||||
for _, suite := range suites {
|
||||
suitesByPeerID[suite.peerID] = suite
|
||||
}
|
||||
|
||||
maxBlockHeight := int64(65)
|
||||
|
||||
reactorPairs := make([]BlockchainReactorPair, 2)
|
||||
|
||||
reactorPairs[0] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, maxBlockHeight)
|
||||
reactorPairs[1] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
|
||||
|
||||
p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
err := r.reactor.Stop()
|
||||
require.NoError(t, err)
|
||||
err = r.app.Stop()
|
||||
require.NoError(t, err)
|
||||
// Simulate a router by listening for all outbound envelopes and proxying the
|
||||
// envelope to the respective peer (suite).
|
||||
go func() {
|
||||
for envelope := range primary.blockchainOutCh {
|
||||
if envelope.Broadcast {
|
||||
for _, s := range suites {
|
||||
// broadcast to everyone except source
|
||||
if s.peerID != primary.peerID {
|
||||
s.blockchainInCh <- p2p.Envelope{
|
||||
From: primary.peerID,
|
||||
To: s.peerID,
|
||||
Message: envelope.Message,
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
suitesByPeerID[envelope.To].blockchainInCh <- p2p.Envelope{
|
||||
From: primary.peerID,
|
||||
To: envelope.To,
|
||||
Message: envelope.Message,
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
tests := []struct {
|
||||
go func() {
|
||||
for pErr := range primary.blockchainPeerErrCh {
|
||||
if dropChErr {
|
||||
primary.reactor.Logger.Debug("dropped peer error", "err", pErr.Err)
|
||||
} else {
|
||||
primary.peerUpdatesCh <- p2p.PeerUpdate{
|
||||
PeerID: pErr.PeerID,
|
||||
Status: p2p.PeerStatusRemoved,
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func TestReactor_AbruptDisconnect(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
genDoc, privVals := randGenesisDoc(config, 1, false, 30)
|
||||
maxBlockHeight := int64(64)
|
||||
testSuites := []*reactorTestSuite{
|
||||
setup(t, genDoc, privVals, maxBlockHeight, 0),
|
||||
setup(t, genDoc, privVals, 0, 0),
|
||||
}
|
||||
|
||||
require.Equal(t, maxBlockHeight, testSuites[0].reactor.store.Height())
|
||||
|
||||
for _, s := range testSuites {
|
||||
simulateRouter(s, testSuites, true)
|
||||
|
||||
// connect reactor to every other reactor
|
||||
for _, ss := range testSuites {
|
||||
if s.peerID != ss.peerID {
|
||||
s.peerUpdatesCh <- p2p.PeerUpdate{
|
||||
Status: p2p.PeerStatusUp,
|
||||
PeerID: ss.peerID,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
secondaryPool := testSuites[1].reactor.pool
|
||||
require.Eventually(
|
||||
t,
|
||||
func() bool {
|
||||
height, _, _ := secondaryPool.GetStatus()
|
||||
return secondaryPool.MaxPeerHeight() > 0 && height > 0 && height < 10
|
||||
},
|
||||
10*time.Second,
|
||||
10*time.Millisecond,
|
||||
"expected node to be partially synced",
|
||||
)
|
||||
|
||||
// Remove synced node from the syncing node which should not result in any
|
||||
// deadlocks or race conditions within the context of poolRoutine.
|
||||
testSuites[1].peerUpdatesCh <- p2p.PeerUpdate{
|
||||
Status: p2p.PeerStatusDown,
|
||||
PeerID: testSuites[0].peerID,
|
||||
}
|
||||
}
|
||||
|
||||
func TestReactor_NoBlockResponse(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
genDoc, privVals := randGenesisDoc(config, 1, false, 30)
|
||||
maxBlockHeight := int64(65)
|
||||
testSuites := []*reactorTestSuite{
|
||||
setup(t, genDoc, privVals, maxBlockHeight, 0),
|
||||
setup(t, genDoc, privVals, 0, 0),
|
||||
}
|
||||
|
||||
require.Equal(t, maxBlockHeight, testSuites[0].reactor.store.Height())
|
||||
|
||||
for _, s := range testSuites {
|
||||
simulateRouter(s, testSuites, true)
|
||||
|
||||
// connect reactor to every other reactor
|
||||
for _, ss := range testSuites {
|
||||
if s.peerID != ss.peerID {
|
||||
s.peerUpdatesCh <- p2p.PeerUpdate{
|
||||
Status: p2p.PeerStatusUp,
|
||||
PeerID: ss.peerID,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
testCases := []struct {
|
||||
height int64
|
||||
existent bool
|
||||
}{
|
||||
@@ -168,161 +292,114 @@ func TestNoBlockResponse(t *testing.T) {
|
||||
{100, false},
|
||||
}
|
||||
|
||||
for {
|
||||
if reactorPairs[1].reactor.pool.IsCaughtUp() {
|
||||
break
|
||||
}
|
||||
secondaryPool := testSuites[1].reactor.pool
|
||||
require.Eventually(
|
||||
t,
|
||||
func() bool { return secondaryPool.MaxPeerHeight() > 0 && secondaryPool.IsCaughtUp() },
|
||||
10*time.Second,
|
||||
10*time.Millisecond,
|
||||
"expected node to be fully synced",
|
||||
)
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
assert.Equal(t, maxBlockHeight, reactorPairs[0].reactor.store.Height())
|
||||
|
||||
for _, tt := range tests {
|
||||
block := reactorPairs[1].reactor.store.LoadBlock(tt.height)
|
||||
if tt.existent {
|
||||
assert.True(t, block != nil)
|
||||
for _, tc := range testCases {
|
||||
block := testSuites[1].reactor.store.LoadBlock(tc.height)
|
||||
if tc.existent {
|
||||
require.True(t, block != nil)
|
||||
} else {
|
||||
assert.True(t, block == nil)
|
||||
require.Nil(t, block)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: This is too hard to test without
|
||||
// an easy way to add test peer to switch
|
||||
// or without significant refactoring of the module.
|
||||
// Alternatively we could actually dial a TCP conn but
|
||||
// that seems extreme.
|
||||
func TestBadBlockStopsPeer(t *testing.T) {
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
func TestReactor_BadBlockStopsPeer(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
maxBlockHeight := int64(148)
|
||||
maxBlockHeight := int64(48)
|
||||
genDoc, privVals := randGenesisDoc(config, 1, false, 30)
|
||||
|
||||
// Other chain needs a different validator set
|
||||
otherGenDoc, otherPrivVals := randGenesisDoc(1, false, 30)
|
||||
otherChain := newBlockchainReactor(log.TestingLogger(), otherGenDoc, otherPrivVals, maxBlockHeight)
|
||||
testSuites := []*reactorTestSuite{
|
||||
setup(t, genDoc, privVals, maxBlockHeight, 1000), // fully synced node
|
||||
setup(t, genDoc, privVals, 0, 1000),
|
||||
setup(t, genDoc, privVals, 0, 1000),
|
||||
setup(t, genDoc, privVals, 0, 1000),
|
||||
setup(t, genDoc, privVals, 0, 1000), // new node
|
||||
}
|
||||
|
||||
defer func() {
|
||||
err := otherChain.reactor.Stop()
|
||||
require.Error(t, err)
|
||||
err = otherChain.app.Stop()
|
||||
require.NoError(t, err)
|
||||
}()
|
||||
require.Equal(t, maxBlockHeight, testSuites[0].reactor.store.Height())
|
||||
|
||||
reactorPairs := make([]BlockchainReactorPair, 4)
|
||||
for _, s := range testSuites[:len(testSuites)-1] {
|
||||
simulateRouter(s, testSuites, true)
|
||||
|
||||
reactorPairs[0] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, maxBlockHeight)
|
||||
reactorPairs[1] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
|
||||
reactorPairs[2] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
|
||||
reactorPairs[3] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
|
||||
|
||||
switches := p2p.MakeConnectedSwitches(config.P2P, 4, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
err := r.reactor.Stop()
|
||||
require.NoError(t, err)
|
||||
|
||||
err = r.app.Stop()
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
time.Sleep(1 * time.Second)
|
||||
caughtUp := true
|
||||
for _, r := range reactorPairs {
|
||||
if !r.reactor.pool.IsCaughtUp() {
|
||||
caughtUp = false
|
||||
// connect reactor to every other reactor except the new node
|
||||
for _, ss := range testSuites[:len(testSuites)-1] {
|
||||
if s.peerID != ss.peerID {
|
||||
s.peerUpdatesCh <- p2p.PeerUpdate{
|
||||
Status: p2p.PeerStatusUp,
|
||||
PeerID: ss.peerID,
|
||||
}
|
||||
}
|
||||
}
|
||||
if caughtUp {
|
||||
break
|
||||
}
|
||||
|
||||
require.Eventually(
|
||||
t,
|
||||
func() bool {
|
||||
caughtUp := true
|
||||
for _, s := range testSuites[1 : len(testSuites)-1] {
|
||||
if s.reactor.pool.MaxPeerHeight() == 0 || !s.reactor.pool.IsCaughtUp() {
|
||||
caughtUp = false
|
||||
}
|
||||
}
|
||||
|
||||
return caughtUp
|
||||
},
|
||||
10*time.Minute,
|
||||
10*time.Millisecond,
|
||||
"expected all nodes to be fully synced",
|
||||
)
|
||||
|
||||
for _, s := range testSuites[:len(testSuites)-1] {
|
||||
require.Len(t, s.reactor.pool.peers, 3)
|
||||
}
|
||||
|
||||
// Mark testSuites[3] as an invalid peer which will cause newSuite to disconnect
|
||||
// from this peer.
|
||||
otherGenDoc, otherPrivVals := randGenesisDoc(config, 1, false, 30)
|
||||
otherSuite := setup(t, otherGenDoc, otherPrivVals, maxBlockHeight, 0)
|
||||
testSuites[3].reactor.store = otherSuite.reactor.store
|
||||
|
||||
// add a fake peer just so we do not wait for the consensus ticker to timeout
|
||||
otherSuite.reactor.pool.SetPeerRange("00ff", 10, 10)
|
||||
|
||||
// start the new peer's faux router
|
||||
newSuite := testSuites[len(testSuites)-1]
|
||||
simulateRouter(newSuite, testSuites, false)
|
||||
|
||||
// connect all nodes to the new peer
|
||||
for _, s := range testSuites[:len(testSuites)-1] {
|
||||
newSuite.peerUpdatesCh <- p2p.PeerUpdate{
|
||||
Status: p2p.PeerStatusUp,
|
||||
PeerID: s.peerID,
|
||||
}
|
||||
}
|
||||
|
||||
//at this time, reactors[0-3] is the newest
|
||||
assert.Equal(t, 3, reactorPairs[1].reactor.Switch.Peers().Size())
|
||||
// wait for the new peer to catch up and become fully synced
|
||||
require.Eventually(
|
||||
t,
|
||||
func() bool { return newSuite.reactor.pool.MaxPeerHeight() > 0 && newSuite.reactor.pool.IsCaughtUp() },
|
||||
10*time.Minute,
|
||||
10*time.Millisecond,
|
||||
"expected new node to be fully synced",
|
||||
)
|
||||
|
||||
// Mark reactorPairs[3] as an invalid peer. Fiddling with .store without a mutex is a data
|
||||
// race, but can't be easily avoided.
|
||||
reactorPairs[3].reactor.store = otherChain.reactor.store
|
||||
|
||||
lastReactorPair := newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
|
||||
reactorPairs = append(reactorPairs, lastReactorPair)
|
||||
|
||||
switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[len(reactorPairs)-1].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)...)
|
||||
|
||||
for i := 0; i < len(reactorPairs)-1; i++ {
|
||||
p2p.Connect2Switches(switches, i, len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
for {
|
||||
if lastReactorPair.reactor.pool.IsCaughtUp() || lastReactorPair.reactor.Switch.Peers().Size() == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
assert.True(t, lastReactorPair.reactor.Switch.Peers().Size() < len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// utility funcs
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
|
||||
type testApp struct {
|
||||
abci.BaseApplication
|
||||
}
|
||||
|
||||
var _ abci.Application = (*testApp)(nil)
|
||||
|
||||
func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) {
|
||||
return abci.ResponseInfo{}
|
||||
}
|
||||
|
||||
func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock {
|
||||
return abci.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock {
|
||||
return abci.ResponseEndBlock{}
|
||||
}
|
||||
|
||||
func (app *testApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
|
||||
return abci.ResponseDeliverTx{Events: []abci.Event{}}
|
||||
}
|
||||
|
||||
func (app *testApp) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
|
||||
return abci.ResponseCheckTx{}
|
||||
}
|
||||
|
||||
func (app *testApp) Commit() abci.ResponseCommit {
|
||||
return abci.ResponseCommit{}
|
||||
}
|
||||
|
||||
func (app *testApp) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) {
|
||||
return
|
||||
require.Eventuallyf(
|
||||
t,
|
||||
func() bool { return len(newSuite.reactor.pool.peers) < len(testSuites)-1 },
|
||||
10*time.Minute,
|
||||
10*time.Millisecond,
|
||||
"invalid number of peers; expected < %d, got: %d",
|
||||
len(testSuites)-1,
|
||||
len(newSuite.reactor.pool.peers),
|
||||
)
|
||||
}
|
||||
|
||||
50
blockchain/v0/test_util.go
Normal file
50
blockchain/v0/test_util.go
Normal file
@@ -0,0 +1,50 @@
|
||||
package v0
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
func randGenesisDoc(
|
||||
config *cfg.Config,
|
||||
numValidators int,
|
||||
randPower bool,
|
||||
minPower int64,
|
||||
) (*types.GenesisDoc, []types.PrivValidator) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
|
||||
for i := 0; i < numValidators; i++ {
|
||||
val, privVal := types.RandValidator(randPower, minPower)
|
||||
validators[i] = types.GenesisValidator{
|
||||
PubKey: val.PubKey,
|
||||
Power: val.VotingPower,
|
||||
}
|
||||
|
||||
privValidators[i] = privVal
|
||||
}
|
||||
|
||||
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
||||
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: config.ChainID(),
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
@@ -1,211 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
flow "github.com/tendermint/tendermint/libs/flowrate"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
//--------
|
||||
// Peer
|
||||
|
||||
// BpPeerParams stores the peer parameters that are used when creating a peer.
|
||||
type BpPeerParams struct {
|
||||
timeout time.Duration
|
||||
minRecvRate int64
|
||||
sampleRate time.Duration
|
||||
windowSize time.Duration
|
||||
}
|
||||
|
||||
// BpPeer is the datastructure associated with a fast sync peer.
|
||||
type BpPeer struct {
|
||||
logger log.Logger
|
||||
ID p2p.ID
|
||||
|
||||
Base int64 // the peer reported base
|
||||
Height int64 // the peer reported height
|
||||
NumPendingBlockRequests int // number of requests still waiting for block responses
|
||||
blocks map[int64]*types.Block // blocks received or expected to be received from this peer
|
||||
blockResponseTimer *time.Timer
|
||||
recvMonitor *flow.Monitor
|
||||
params *BpPeerParams // parameters for timer and monitor
|
||||
|
||||
onErr func(err error, peerID p2p.ID) // function to call on error
|
||||
}
|
||||
|
||||
// NewBpPeer creates a new peer.
|
||||
func NewBpPeer(peerID p2p.ID, base int64, height int64,
|
||||
onErr func(err error, peerID p2p.ID), params *BpPeerParams) *BpPeer {
|
||||
|
||||
if params == nil {
|
||||
params = BpPeerDefaultParams()
|
||||
}
|
||||
return &BpPeer{
|
||||
ID: peerID,
|
||||
Base: base,
|
||||
Height: height,
|
||||
blocks: make(map[int64]*types.Block, maxRequestsPerPeer),
|
||||
logger: log.NewNopLogger(),
|
||||
onErr: onErr,
|
||||
params: params,
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a string representation of a peer.
|
||||
func (peer *BpPeer) String() string {
|
||||
return fmt.Sprintf("peer: %v height: %v pending: %v", peer.ID, peer.Height, peer.NumPendingBlockRequests)
|
||||
}
|
||||
|
||||
// SetLogger sets the logger of the peer.
|
||||
func (peer *BpPeer) SetLogger(l log.Logger) {
|
||||
peer.logger = l
|
||||
}
|
||||
|
||||
// Cleanup performs cleanup of the peer, removes blocks, requests, stops timer and monitor.
|
||||
func (peer *BpPeer) Cleanup() {
|
||||
if peer.blockResponseTimer != nil {
|
||||
peer.blockResponseTimer.Stop()
|
||||
}
|
||||
if peer.NumPendingBlockRequests != 0 {
|
||||
peer.logger.Info("peer with pending requests is being cleaned", "peer", peer.ID)
|
||||
}
|
||||
if len(peer.blocks)-peer.NumPendingBlockRequests != 0 {
|
||||
peer.logger.Info("peer with pending blocks is being cleaned", "peer", peer.ID)
|
||||
}
|
||||
for h := range peer.blocks {
|
||||
delete(peer.blocks, h)
|
||||
}
|
||||
peer.NumPendingBlockRequests = 0
|
||||
peer.recvMonitor = nil
|
||||
}
|
||||
|
||||
// BlockAtHeight returns the block at a given height if available and errMissingBlock otherwise.
|
||||
func (peer *BpPeer) BlockAtHeight(height int64) (*types.Block, error) {
|
||||
block, ok := peer.blocks[height]
|
||||
if !ok {
|
||||
return nil, errMissingBlock
|
||||
}
|
||||
if block == nil {
|
||||
return nil, errMissingBlock
|
||||
}
|
||||
return peer.blocks[height], nil
|
||||
}
|
||||
|
||||
// AddBlock adds a block at peer level. Block must be non-nil and recvSize a positive integer
|
||||
// The peer must have a pending request for this block.
|
||||
func (peer *BpPeer) AddBlock(block *types.Block, recvSize int) error {
|
||||
if block == nil || recvSize < 0 {
|
||||
panic("bad parameters")
|
||||
}
|
||||
existingBlock, ok := peer.blocks[block.Height]
|
||||
if !ok {
|
||||
peer.logger.Error("unsolicited block", "blockHeight", block.Height, "peer", peer.ID)
|
||||
return errMissingBlock
|
||||
}
|
||||
if existingBlock != nil {
|
||||
peer.logger.Error("already have a block for height", "height", block.Height)
|
||||
return errDuplicateBlock
|
||||
}
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
panic("peer does not have pending requests")
|
||||
}
|
||||
peer.blocks[block.Height] = block
|
||||
peer.NumPendingBlockRequests--
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
peer.stopMonitor()
|
||||
peer.stopBlockResponseTimer()
|
||||
} else {
|
||||
peer.recvMonitor.Update(recvSize)
|
||||
peer.resetBlockResponseTimer()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveBlock removes the block of given height
|
||||
func (peer *BpPeer) RemoveBlock(height int64) {
|
||||
delete(peer.blocks, height)
|
||||
}
|
||||
|
||||
// RequestSent records that a request was sent, and starts the peer timer and monitor if needed.
|
||||
func (peer *BpPeer) RequestSent(height int64) {
|
||||
peer.blocks[height] = nil
|
||||
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
peer.startMonitor()
|
||||
peer.resetBlockResponseTimer()
|
||||
}
|
||||
peer.NumPendingBlockRequests++
|
||||
}
|
||||
|
||||
// CheckRate verifies that the response rate of the peer is acceptable (higher than the minimum allowed).
|
||||
func (peer *BpPeer) CheckRate() error {
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
return nil
|
||||
}
|
||||
curRate := peer.recvMonitor.Status().CurRate
|
||||
// curRate can be 0 on start
|
||||
if curRate != 0 && curRate < peer.params.minRecvRate {
|
||||
err := errSlowPeer
|
||||
peer.logger.Error("SendTimeout", "peer", peer,
|
||||
"reason", err,
|
||||
"curRate", fmt.Sprintf("%d KB/s", curRate/1024),
|
||||
"minRate", fmt.Sprintf("%d KB/s", peer.params.minRecvRate/1024))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (peer *BpPeer) onTimeout() {
|
||||
peer.onErr(errNoPeerResponse, peer.ID)
|
||||
}
|
||||
|
||||
func (peer *BpPeer) stopMonitor() {
|
||||
peer.recvMonitor.Done()
|
||||
peer.recvMonitor = nil
|
||||
}
|
||||
|
||||
func (peer *BpPeer) startMonitor() {
|
||||
peer.recvMonitor = flow.New(peer.params.sampleRate, peer.params.windowSize)
|
||||
initialValue := float64(peer.params.minRecvRate) * math.E
|
||||
peer.recvMonitor.SetREMA(initialValue)
|
||||
}
|
||||
|
||||
func (peer *BpPeer) resetBlockResponseTimer() {
|
||||
if peer.blockResponseTimer == nil {
|
||||
peer.blockResponseTimer = time.AfterFunc(peer.params.timeout, peer.onTimeout)
|
||||
} else {
|
||||
peer.blockResponseTimer.Reset(peer.params.timeout)
|
||||
}
|
||||
}
|
||||
|
||||
func (peer *BpPeer) stopBlockResponseTimer() bool {
|
||||
if peer.blockResponseTimer == nil {
|
||||
return false
|
||||
}
|
||||
return peer.blockResponseTimer.Stop()
|
||||
}
|
||||
|
||||
// BpPeerDefaultParams returns the default peer parameters.
|
||||
func BpPeerDefaultParams() *BpPeerParams {
|
||||
return &BpPeerParams{
|
||||
// Timeout for a peer to respond to a block request.
|
||||
timeout: 15 * time.Second,
|
||||
|
||||
// Minimum recv rate to ensure we're receiving blocks from a peer fast
|
||||
// enough. If a peer is not sending data at at least that rate, we
|
||||
// consider them to have timedout and we disconnect.
|
||||
//
|
||||
// Assuming a DSL connection (not a good choice) 128 Kbps (upload) ~ 15 KB/s,
|
||||
// sending data across atlantic ~ 7.5 KB/s.
|
||||
minRecvRate: int64(7680),
|
||||
|
||||
// Monitor parameters
|
||||
sampleRate: time.Second,
|
||||
windowSize: 40 * time.Second,
|
||||
}
|
||||
}
|
||||
@@ -1,280 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestPeerMonitor(t *testing.T) {
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
nil)
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
peer.startMonitor()
|
||||
assert.NotNil(t, peer.recvMonitor)
|
||||
peer.stopMonitor()
|
||||
assert.Nil(t, peer.recvMonitor)
|
||||
}
|
||||
|
||||
func TestPeerResetBlockResponseTimer(t *testing.T) {
|
||||
var (
|
||||
numErrFuncCalls int // number of calls to the errFunc
|
||||
lastErr error // last generated error
|
||||
peerTestMtx sync.Mutex // modifications of ^^ variables are also done from timer handler goroutine
|
||||
)
|
||||
params := &BpPeerParams{timeout: 20 * time.Millisecond}
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 10,
|
||||
func(err error, _ p2p.ID) {
|
||||
peerTestMtx.Lock()
|
||||
defer peerTestMtx.Unlock()
|
||||
lastErr = err
|
||||
numErrFuncCalls++
|
||||
},
|
||||
params)
|
||||
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
checkByStoppingPeerTimer(t, peer, false)
|
||||
|
||||
// initial reset call with peer having a nil timer
|
||||
peer.resetBlockResponseTimer()
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
// make sure timer is running and stop it
|
||||
checkByStoppingPeerTimer(t, peer, true)
|
||||
|
||||
// reset with running timer
|
||||
peer.resetBlockResponseTimer()
|
||||
time.Sleep(5 * time.Millisecond)
|
||||
peer.resetBlockResponseTimer()
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
|
||||
// let the timer expire and ...
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
// ... check timer is not running
|
||||
checkByStoppingPeerTimer(t, peer, false)
|
||||
|
||||
peerTestMtx.Lock()
|
||||
// ... check errNoPeerResponse has been sent
|
||||
assert.Equal(t, 1, numErrFuncCalls)
|
||||
assert.Equal(t, lastErr, errNoPeerResponse)
|
||||
peerTestMtx.Unlock()
|
||||
}
|
||||
|
||||
func TestPeerRequestSent(t *testing.T) {
|
||||
params := &BpPeerParams{timeout: 2 * time.Millisecond}
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
params)
|
||||
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
peer.RequestSent(1)
|
||||
assert.NotNil(t, peer.recvMonitor)
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
assert.Equal(t, 1, peer.NumPendingBlockRequests)
|
||||
|
||||
peer.RequestSent(1)
|
||||
assert.NotNil(t, peer.recvMonitor)
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
assert.Equal(t, 2, peer.NumPendingBlockRequests)
|
||||
}
|
||||
|
||||
func TestPeerGetAndRemoveBlock(t *testing.T) {
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 100,
|
||||
func(err error, _ p2p.ID) {},
|
||||
nil)
|
||||
|
||||
// Change peer height
|
||||
peer.Height = int64(10)
|
||||
assert.Equal(t, int64(10), peer.Height)
|
||||
|
||||
// request some blocks and receive few of them
|
||||
for i := 1; i <= 10; i++ {
|
||||
peer.RequestSent(int64(i))
|
||||
if i > 5 {
|
||||
// only receive blocks 1..5
|
||||
continue
|
||||
}
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 10)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
height int64
|
||||
wantErr error
|
||||
blockPresent bool
|
||||
}{
|
||||
{"no request", 100, errMissingBlock, false},
|
||||
{"no block", 6, errMissingBlock, false},
|
||||
{"block 1 present", 1, nil, true},
|
||||
{"block max present", 5, nil, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// try to get the block
|
||||
b, err := peer.BlockAtHeight(tt.height)
|
||||
assert.Equal(t, tt.wantErr, err)
|
||||
assert.Equal(t, tt.blockPresent, b != nil)
|
||||
|
||||
// remove the block
|
||||
peer.RemoveBlock(tt.height)
|
||||
_, err = peer.BlockAtHeight(tt.height)
|
||||
assert.Equal(t, errMissingBlock, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerAddBlock(t *testing.T) {
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 100,
|
||||
func(err error, _ p2p.ID) {},
|
||||
nil)
|
||||
|
||||
// request some blocks, receive one
|
||||
for i := 1; i <= 10; i++ {
|
||||
peer.RequestSent(int64(i))
|
||||
if i == 5 {
|
||||
// receive block 5
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 10)
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
height int64
|
||||
wantErr error
|
||||
blockPresent bool
|
||||
}{
|
||||
{"no request", 50, errMissingBlock, false},
|
||||
{"duplicate block", 5, errDuplicateBlock, true},
|
||||
{"block 1 successfully received", 1, nil, true},
|
||||
{"block max successfully received", 10, nil, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// try to get the block
|
||||
err := peer.AddBlock(makeSmallBlock(int(tt.height)), 10)
|
||||
assert.Equal(t, tt.wantErr, err)
|
||||
_, err = peer.BlockAtHeight(tt.height)
|
||||
assert.Equal(t, tt.blockPresent, err == nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerOnErrFuncCalledDueToExpiration(t *testing.T) {
|
||||
|
||||
params := &BpPeerParams{timeout: 10 * time.Millisecond}
|
||||
var (
|
||||
numErrFuncCalls int // number of calls to the onErr function
|
||||
lastErr error // last generated error
|
||||
peerTestMtx sync.Mutex // modifications of ^^ variables are also done from timer handler goroutine
|
||||
)
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 10,
|
||||
func(err error, _ p2p.ID) {
|
||||
peerTestMtx.Lock()
|
||||
defer peerTestMtx.Unlock()
|
||||
lastErr = err
|
||||
numErrFuncCalls++
|
||||
},
|
||||
params)
|
||||
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
peer.RequestSent(1)
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
// timer should have expired by now, check that the on error function was called
|
||||
peerTestMtx.Lock()
|
||||
assert.Equal(t, 1, numErrFuncCalls)
|
||||
assert.Equal(t, errNoPeerResponse, lastErr)
|
||||
peerTestMtx.Unlock()
|
||||
}
|
||||
|
||||
func TestPeerCheckRate(t *testing.T) {
|
||||
params := &BpPeerParams{
|
||||
timeout: time.Second,
|
||||
minRecvRate: int64(100), // 100 bytes/sec exponential moving average
|
||||
}
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
params)
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
require.Nil(t, peer.CheckRate())
|
||||
|
||||
for i := 0; i < 40; i++ {
|
||||
peer.RequestSent(int64(i))
|
||||
}
|
||||
|
||||
// monitor starts with a higher rEMA (~ 2*minRecvRate), wait for it to go down
|
||||
time.Sleep(900 * time.Millisecond)
|
||||
|
||||
// normal peer - send a bit more than 100 bytes/sec, > 10 bytes/100msec, check peer is not considered slow
|
||||
for i := 0; i < 10; i++ {
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 11)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
require.Nil(t, peer.CheckRate())
|
||||
}
|
||||
|
||||
// slow peer - send a bit less than 10 bytes/100msec
|
||||
for i := 10; i < 20; i++ {
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 9)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
// check peer is considered slow
|
||||
assert.Equal(t, errSlowPeer, peer.CheckRate())
|
||||
}
|
||||
|
||||
func TestPeerCleanup(t *testing.T) {
|
||||
params := &BpPeerParams{timeout: 2 * time.Millisecond}
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(tmrand.Str(12)), 0, 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
params)
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
assert.Nil(t, peer.blockResponseTimer)
|
||||
peer.RequestSent(1)
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
|
||||
peer.Cleanup()
|
||||
checkByStoppingPeerTimer(t, peer, false)
|
||||
}
|
||||
|
||||
// Check if peer timer is running or not (a running timer can be successfully stopped).
|
||||
// Note: stops the timer.
|
||||
func checkByStoppingPeerTimer(t *testing.T, peer *BpPeer, running bool) {
|
||||
assert.NotPanics(t, func() {
|
||||
stopped := peer.stopBlockResponseTimer()
|
||||
if running {
|
||||
assert.True(t, stopped)
|
||||
} else {
|
||||
assert.False(t, stopped)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func makeSmallBlock(height int) *types.Block {
|
||||
return types.MakeBlock(int64(height), []types.Tx{types.Tx("foo")}, nil, nil)
|
||||
}
|
||||
@@ -1,370 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// BlockPool keeps track of the fast sync peers, block requests and block responses.
|
||||
type BlockPool struct {
|
||||
logger log.Logger
|
||||
// Set of peers that have sent status responses, with height bigger than pool.Height
|
||||
peers map[p2p.ID]*BpPeer
|
||||
// Set of block heights and the corresponding peers from where a block response is expected or has been received.
|
||||
blocks map[int64]p2p.ID
|
||||
|
||||
plannedRequests map[int64]struct{} // list of blocks to be assigned peers for blockRequest
|
||||
nextRequestHeight int64 // next height to be added to plannedRequests
|
||||
|
||||
Height int64 // height of next block to execute
|
||||
MaxPeerHeight int64 // maximum height of all peers
|
||||
toBcR bcReactor
|
||||
}
|
||||
|
||||
// NewBlockPool creates a new BlockPool.
|
||||
func NewBlockPool(height int64, toBcR bcReactor) *BlockPool {
|
||||
return &BlockPool{
|
||||
Height: height,
|
||||
MaxPeerHeight: 0,
|
||||
peers: make(map[p2p.ID]*BpPeer),
|
||||
blocks: make(map[int64]p2p.ID),
|
||||
plannedRequests: make(map[int64]struct{}),
|
||||
nextRequestHeight: height,
|
||||
toBcR: toBcR,
|
||||
}
|
||||
}
|
||||
|
||||
// SetLogger sets the logger of the pool.
|
||||
func (pool *BlockPool) SetLogger(l log.Logger) {
|
||||
pool.logger = l
|
||||
}
|
||||
|
||||
// ReachedMaxHeight check if the pool has reached the maximum peer height.
|
||||
func (pool *BlockPool) ReachedMaxHeight() bool {
|
||||
return pool.Height >= pool.MaxPeerHeight
|
||||
}
|
||||
|
||||
func (pool *BlockPool) rescheduleRequest(peerID p2p.ID, height int64) {
|
||||
pool.logger.Info("reschedule requests made to peer for height ", "peerID", peerID, "height", height)
|
||||
pool.plannedRequests[height] = struct{}{}
|
||||
delete(pool.blocks, height)
|
||||
pool.peers[peerID].RemoveBlock(height)
|
||||
}
|
||||
|
||||
// Updates the pool's max height. If no peers are left MaxPeerHeight is set to 0.
|
||||
func (pool *BlockPool) updateMaxPeerHeight() {
|
||||
var newMax int64
|
||||
for _, peer := range pool.peers {
|
||||
peerHeight := peer.Height
|
||||
if peerHeight > newMax {
|
||||
newMax = peerHeight
|
||||
}
|
||||
}
|
||||
pool.MaxPeerHeight = newMax
|
||||
}
|
||||
|
||||
// UpdatePeer adds a new peer or updates an existing peer with a new base and height.
|
||||
// If a peer is short it is not added.
|
||||
func (pool *BlockPool) UpdatePeer(peerID p2p.ID, base int64, height int64) error {
|
||||
|
||||
peer := pool.peers[peerID]
|
||||
|
||||
if peer == nil {
|
||||
if height < pool.Height {
|
||||
pool.logger.Info("Peer height too small",
|
||||
"peer", peerID, "height", height, "fsm_height", pool.Height)
|
||||
return errPeerTooShort
|
||||
}
|
||||
// Add new peer.
|
||||
peer = NewBpPeer(peerID, base, height, pool.toBcR.sendPeerError, nil)
|
||||
peer.SetLogger(pool.logger.With("peer", peerID))
|
||||
pool.peers[peerID] = peer
|
||||
pool.logger.Info("added peer", "peerID", peerID, "base", base, "height", height, "num_peers", len(pool.peers))
|
||||
} else {
|
||||
// Check if peer is lowering its height. This is not allowed.
|
||||
if height < peer.Height {
|
||||
pool.RemovePeer(peerID, errPeerLowersItsHeight)
|
||||
return errPeerLowersItsHeight
|
||||
}
|
||||
// Update existing peer.
|
||||
peer.Base = base
|
||||
peer.Height = height
|
||||
}
|
||||
|
||||
// Update the pool's MaxPeerHeight if needed.
|
||||
pool.updateMaxPeerHeight()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleans and deletes the peer. Recomputes the max peer height.
|
||||
func (pool *BlockPool) deletePeer(peer *BpPeer) {
|
||||
if peer == nil {
|
||||
return
|
||||
}
|
||||
peer.Cleanup()
|
||||
delete(pool.peers, peer.ID)
|
||||
|
||||
if peer.Height == pool.MaxPeerHeight {
|
||||
pool.updateMaxPeerHeight()
|
||||
}
|
||||
}
|
||||
|
||||
// RemovePeer removes the blocks and requests from the peer, reschedules them and deletes the peer.
|
||||
func (pool *BlockPool) RemovePeer(peerID p2p.ID, err error) {
|
||||
peer := pool.peers[peerID]
|
||||
if peer == nil {
|
||||
return
|
||||
}
|
||||
pool.logger.Info("removing peer", "peerID", peerID, "error", err)
|
||||
|
||||
// Reschedule the block requests made to the peer, or received and not processed yet.
|
||||
// Note that some of the requests may be removed further down.
|
||||
for h := range pool.peers[peerID].blocks {
|
||||
pool.rescheduleRequest(peerID, h)
|
||||
}
|
||||
|
||||
oldMaxPeerHeight := pool.MaxPeerHeight
|
||||
// Delete the peer. This operation may result in the pool's MaxPeerHeight being lowered.
|
||||
pool.deletePeer(peer)
|
||||
|
||||
// Check if the pool's MaxPeerHeight has been lowered.
|
||||
// This may happen if the tallest peer has been removed.
|
||||
if oldMaxPeerHeight > pool.MaxPeerHeight {
|
||||
// Remove any planned requests for heights over the new MaxPeerHeight.
|
||||
for h := range pool.plannedRequests {
|
||||
if h > pool.MaxPeerHeight {
|
||||
delete(pool.plannedRequests, h)
|
||||
}
|
||||
}
|
||||
// Adjust the nextRequestHeight to the new max plus one.
|
||||
if pool.nextRequestHeight > pool.MaxPeerHeight {
|
||||
pool.nextRequestHeight = pool.MaxPeerHeight + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) removeShortPeers() {
|
||||
for _, peer := range pool.peers {
|
||||
if peer.Height < pool.Height {
|
||||
pool.RemovePeer(peer.ID, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) removeBadPeers() {
|
||||
pool.removeShortPeers()
|
||||
for _, peer := range pool.peers {
|
||||
if err := peer.CheckRate(); err != nil {
|
||||
pool.RemovePeer(peer.ID, err)
|
||||
pool.toBcR.sendPeerError(err, peer.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MakeNextRequests creates more requests if the block pool is running low.
|
||||
func (pool *BlockPool) MakeNextRequests(maxNumRequests int) {
|
||||
heights := pool.makeRequestBatch(maxNumRequests)
|
||||
if len(heights) != 0 {
|
||||
pool.logger.Info("makeNextRequests will make following requests",
|
||||
"number", len(heights), "heights", heights)
|
||||
}
|
||||
|
||||
for _, height := range heights {
|
||||
h := int64(height)
|
||||
if !pool.sendRequest(h) {
|
||||
// If a good peer was not found for sending the request at height h then return,
|
||||
// as it shouldn't be possible to find a peer for h+1.
|
||||
return
|
||||
}
|
||||
delete(pool.plannedRequests, h)
|
||||
}
|
||||
}
|
||||
|
||||
// Makes a batch of requests sorted by height such that the block pool has up to maxNumRequests entries.
|
||||
func (pool *BlockPool) makeRequestBatch(maxNumRequests int) []int {
|
||||
pool.removeBadPeers()
|
||||
// At this point pool.requests may include heights for requests to be redone due to removal of peers:
|
||||
// - peers timed out or were removed by switch
|
||||
// - FSM timed out on waiting to advance the block execution due to missing blocks at h or h+1
|
||||
// Determine the number of requests needed by subtracting the number of requests already made from the maximum
|
||||
// allowed
|
||||
numNeeded := maxNumRequests - len(pool.blocks)
|
||||
for len(pool.plannedRequests) < numNeeded {
|
||||
if pool.nextRequestHeight > pool.MaxPeerHeight {
|
||||
break
|
||||
}
|
||||
pool.plannedRequests[pool.nextRequestHeight] = struct{}{}
|
||||
pool.nextRequestHeight++
|
||||
}
|
||||
|
||||
heights := make([]int, 0, len(pool.plannedRequests))
|
||||
for k := range pool.plannedRequests {
|
||||
heights = append(heights, int(k))
|
||||
}
|
||||
sort.Ints(heights)
|
||||
return heights
|
||||
}
|
||||
|
||||
func (pool *BlockPool) sendRequest(height int64) bool {
|
||||
for _, peer := range pool.peers {
|
||||
if peer.NumPendingBlockRequests >= maxRequestsPerPeer {
|
||||
continue
|
||||
}
|
||||
if peer.Base > height || peer.Height < height {
|
||||
continue
|
||||
}
|
||||
|
||||
err := pool.toBcR.sendBlockRequest(peer.ID, height)
|
||||
if err == errNilPeerForBlockRequest {
|
||||
// Switch does not have this peer, remove it and continue to look for another peer.
|
||||
pool.logger.Error("switch does not have peer..removing peer selected for height", "peer",
|
||||
peer.ID, "height", height)
|
||||
pool.RemovePeer(peer.ID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err == errSendQueueFull {
|
||||
pool.logger.Error("peer queue is full", "peer", peer.ID, "height", height)
|
||||
continue
|
||||
}
|
||||
|
||||
pool.logger.Info("assigned request to peer", "peer", peer.ID, "height", height)
|
||||
|
||||
pool.blocks[height] = peer.ID
|
||||
peer.RequestSent(height)
|
||||
|
||||
return true
|
||||
}
|
||||
pool.logger.Error("could not find peer to send request for block at height", "height", height)
|
||||
return false
|
||||
}
|
||||
|
||||
// AddBlock validates that the block comes from the peer it was expected from and stores it in the 'blocks' map.
|
||||
func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int) error {
|
||||
peer, ok := pool.peers[peerID]
|
||||
if !ok {
|
||||
pool.logger.Error("block from unknown peer", "height", block.Height, "peer", peerID)
|
||||
return errBadDataFromPeer
|
||||
}
|
||||
if wantPeerID, ok := pool.blocks[block.Height]; ok && wantPeerID != peerID {
|
||||
pool.logger.Error("block received from wrong peer", "height", block.Height,
|
||||
"peer", peerID, "expected_peer", wantPeerID)
|
||||
return errBadDataFromPeer
|
||||
}
|
||||
|
||||
return peer.AddBlock(block, blockSize)
|
||||
}
|
||||
|
||||
// BlockData stores the peer responsible to deliver a block and the actual block if delivered.
|
||||
type BlockData struct {
|
||||
block *types.Block
|
||||
peer *BpPeer
|
||||
}
|
||||
|
||||
// BlockAndPeerAtHeight retrieves the block and delivery peer at specified height.
|
||||
// Returns errMissingBlock if a block was not found
|
||||
func (pool *BlockPool) BlockAndPeerAtHeight(height int64) (bData *BlockData, err error) {
|
||||
peerID := pool.blocks[height]
|
||||
peer := pool.peers[peerID]
|
||||
if peer == nil {
|
||||
return nil, errMissingBlock
|
||||
}
|
||||
|
||||
block, err := peer.BlockAtHeight(height)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &BlockData{peer: peer, block: block}, nil
|
||||
|
||||
}
|
||||
|
||||
// FirstTwoBlocksAndPeers returns the blocks and the delivery peers at pool's height H and H+1.
|
||||
func (pool *BlockPool) FirstTwoBlocksAndPeers() (first, second *BlockData, err error) {
|
||||
first, err = pool.BlockAndPeerAtHeight(pool.Height)
|
||||
second, err2 := pool.BlockAndPeerAtHeight(pool.Height + 1)
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// InvalidateFirstTwoBlocks removes the peers that sent us the first two blocks, blocks are removed by RemovePeer().
|
||||
func (pool *BlockPool) InvalidateFirstTwoBlocks(err error) {
|
||||
first, err1 := pool.BlockAndPeerAtHeight(pool.Height)
|
||||
second, err2 := pool.BlockAndPeerAtHeight(pool.Height + 1)
|
||||
|
||||
if err1 == nil {
|
||||
pool.RemovePeer(first.peer.ID, err)
|
||||
}
|
||||
if err2 == nil {
|
||||
pool.RemovePeer(second.peer.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessedCurrentHeightBlock performs cleanup after a block is processed. It removes block at pool height and
|
||||
// the peers that are now short.
|
||||
func (pool *BlockPool) ProcessedCurrentHeightBlock() {
|
||||
peerID, peerOk := pool.blocks[pool.Height]
|
||||
if peerOk {
|
||||
pool.peers[peerID].RemoveBlock(pool.Height)
|
||||
}
|
||||
delete(pool.blocks, pool.Height)
|
||||
pool.logger.Debug("removed block at height", "height", pool.Height)
|
||||
pool.Height++
|
||||
pool.removeShortPeers()
|
||||
}
|
||||
|
||||
// RemovePeerAtCurrentHeights checks if a block at pool's height H exists and if not, it removes the
|
||||
// delivery peer and returns. If a block at height H exists then the check and peer removal is done for H+1.
|
||||
// This function is called when the FSM is not able to make progress for some time.
|
||||
// This happens if either the block H or H+1 have not been delivered.
|
||||
func (pool *BlockPool) RemovePeerAtCurrentHeights(err error) {
|
||||
peerID := pool.blocks[pool.Height]
|
||||
peer, ok := pool.peers[peerID]
|
||||
if ok {
|
||||
if _, err := peer.BlockAtHeight(pool.Height); err != nil {
|
||||
pool.logger.Info("remove peer that hasn't sent block at pool.Height",
|
||||
"peer", peerID, "height", pool.Height)
|
||||
pool.RemovePeer(peerID, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
peerID = pool.blocks[pool.Height+1]
|
||||
peer, ok = pool.peers[peerID]
|
||||
if ok {
|
||||
if _, err := peer.BlockAtHeight(pool.Height + 1); err != nil {
|
||||
pool.logger.Info("remove peer that hasn't sent block at pool.Height+1",
|
||||
"peer", peerID, "height", pool.Height+1)
|
||||
pool.RemovePeer(peerID, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup performs pool and peer cleanup
|
||||
func (pool *BlockPool) Cleanup() {
|
||||
for id, peer := range pool.peers {
|
||||
peer.Cleanup()
|
||||
delete(pool.peers, id)
|
||||
}
|
||||
pool.plannedRequests = make(map[int64]struct{})
|
||||
pool.blocks = make(map[int64]p2p.ID)
|
||||
pool.nextRequestHeight = 0
|
||||
pool.Height = 0
|
||||
pool.MaxPeerHeight = 0
|
||||
}
|
||||
|
||||
// NumPeers returns the number of peers in the pool
|
||||
func (pool *BlockPool) NumPeers() int {
|
||||
return len(pool.peers)
|
||||
}
|
||||
|
||||
// NeedsBlocks returns true if more blocks are required.
|
||||
func (pool *BlockPool) NeedsBlocks() bool {
|
||||
return len(pool.blocks) < maxNumRequests
|
||||
}
|
||||
@@ -1,691 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type testPeer struct {
|
||||
id p2p.ID
|
||||
base int64
|
||||
height int64
|
||||
}
|
||||
|
||||
type testBcR struct {
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
type testValues struct {
|
||||
numRequestsSent int
|
||||
}
|
||||
|
||||
var testResults testValues
|
||||
|
||||
func resetPoolTestResults() {
|
||||
testResults.numRequestsSent = 0
|
||||
}
|
||||
|
||||
func (testR *testBcR) sendPeerError(err error, peerID p2p.ID) {
|
||||
}
|
||||
|
||||
func (testR *testBcR) sendStatusRequest() {
|
||||
}
|
||||
|
||||
func (testR *testBcR) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
testResults.numRequestsSent++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (testR *testBcR) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
|
||||
}
|
||||
|
||||
func (testR *testBcR) switchToConsensus() {
|
||||
|
||||
}
|
||||
|
||||
func newTestBcR() *testBcR {
|
||||
testBcR := &testBcR{logger: log.TestingLogger()}
|
||||
return testBcR
|
||||
}
|
||||
|
||||
type tPBlocks struct {
|
||||
id p2p.ID
|
||||
create bool
|
||||
}
|
||||
|
||||
// Makes a block pool with specified current height, list of peers, block requests and block responses
|
||||
func makeBlockPool(bcr *testBcR, height int64, peers []BpPeer, blocks map[int64]tPBlocks) *BlockPool {
|
||||
bPool := NewBlockPool(height, bcr)
|
||||
bPool.SetLogger(bcr.logger)
|
||||
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
|
||||
var maxH int64
|
||||
for _, p := range peers {
|
||||
if p.Height > maxH {
|
||||
maxH = p.Height
|
||||
}
|
||||
bPool.peers[p.ID] = NewBpPeer(p.ID, p.Base, p.Height, bcr.sendPeerError, nil)
|
||||
bPool.peers[p.ID].SetLogger(bcr.logger)
|
||||
|
||||
}
|
||||
bPool.MaxPeerHeight = maxH
|
||||
for h, p := range blocks {
|
||||
bPool.blocks[h] = p.id
|
||||
bPool.peers[p.id].RequestSent(h)
|
||||
if p.create {
|
||||
// simulate that a block at height h has been received
|
||||
_ = bPool.peers[p.id].AddBlock(types.MakeBlock(h, txs, nil, nil), 100)
|
||||
}
|
||||
}
|
||||
return bPool
|
||||
}
|
||||
|
||||
func assertPeerSetsEquivalent(t *testing.T, set1 map[p2p.ID]*BpPeer, set2 map[p2p.ID]*BpPeer) {
|
||||
assert.Equal(t, len(set1), len(set2))
|
||||
for peerID, peer1 := range set1 {
|
||||
peer2 := set2[peerID]
|
||||
assert.NotNil(t, peer2)
|
||||
assert.Equal(t, peer1.NumPendingBlockRequests, peer2.NumPendingBlockRequests)
|
||||
assert.Equal(t, peer1.Height, peer2.Height)
|
||||
assert.Equal(t, peer1.Base, peer2.Base)
|
||||
assert.Equal(t, len(peer1.blocks), len(peer2.blocks))
|
||||
for h, block1 := range peer1.blocks {
|
||||
block2 := peer2.blocks[h]
|
||||
// block1 and block2 could be nil if a request was made but no block was received
|
||||
assert.Equal(t, block1, block2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func assertBlockPoolEquivalent(t *testing.T, poolWanted, pool *BlockPool) {
|
||||
assert.Equal(t, poolWanted.blocks, pool.blocks)
|
||||
assertPeerSetsEquivalent(t, poolWanted.peers, pool.peers)
|
||||
assert.Equal(t, poolWanted.MaxPeerHeight, pool.MaxPeerHeight)
|
||||
assert.Equal(t, poolWanted.Height, pool.Height)
|
||||
|
||||
}
|
||||
|
||||
func TestBlockPoolUpdatePeer(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
args testPeer
|
||||
poolWanted *BlockPool
|
||||
errWanted error
|
||||
}{
|
||||
{
|
||||
name: "add a first short peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 0, 50},
|
||||
errWanted: errPeerTooShort,
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "add a first good peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 0, 101},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 101}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "add a first good peer with base",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 10, 101},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Base: 10, Height: 101}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "increase the height of P1 from 120 to 123",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 0, 123},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 123}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "decrease the height of P1 from 120 to 110",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 0, 110},
|
||||
errWanted: errPeerLowersItsHeight,
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "decrease the height of P1 from 105 to 102 with blocks",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 105}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 101: {"P1", true}, 102: {"P1", true}}),
|
||||
args: testPeer{"P1", 0, 102},
|
||||
errWanted: errPeerLowersItsHeight,
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{},
|
||||
map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pool := tt.pool
|
||||
err := pool.UpdatePeer(tt.args.id, tt.args.base, tt.args.height)
|
||||
assert.Equal(t, tt.errWanted, err)
|
||||
assert.Equal(t, tt.poolWanted.blocks, tt.pool.blocks)
|
||||
assertPeerSetsEquivalent(t, tt.poolWanted.peers, tt.pool.peers)
|
||||
assert.Equal(t, tt.poolWanted.MaxPeerHeight, tt.pool.MaxPeerHeight)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
err error
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
args args
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "attempt to delete non-existing peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: args{"P99", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the only peer without blocks",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the shortest of two peers without blocks",
|
||||
pool: makeBlockPool(
|
||||
testBcR,
|
||||
100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 120}},
|
||||
map[int64]tPBlocks{}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the tallest of two peers without blocks",
|
||||
pool: makeBlockPool(
|
||||
testBcR,
|
||||
100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 120}},
|
||||
map[int64]tPBlocks{}),
|
||||
args: args{"P2", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the only peer with block requests sent and blocks received",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the shortest of two peers with block requests sent and blocks received",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 200}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 200}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the tallest of two peers with block requests sent and blocks received",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 110}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 110}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.RemovePeer(tt.args.peerID, tt.args.err)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolRemoveShortPeers(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "no short peers",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 110}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 110}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
|
||||
{
|
||||
name: "one short peer",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 90}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
|
||||
{
|
||||
name: "all short peers",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 90}, {ID: "P2", Height: 91}, {ID: "P3", Height: 92}}, map[int64]tPBlocks{}),
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pool := tt.pool
|
||||
pool.removeShortPeers()
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolSendRequestBatch(t *testing.T) {
|
||||
type testPeerResult struct {
|
||||
id p2p.ID
|
||||
numPendingBlockRequests int
|
||||
}
|
||||
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
maxRequestsPerPeer int
|
||||
expRequests map[int64]bool
|
||||
expRequestsSent int
|
||||
expPeerResults []testPeerResult
|
||||
}{
|
||||
{
|
||||
name: "one peer - send up to maxRequestsPerPeer block requests",
|
||||
pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
maxRequestsPerPeer: 2,
|
||||
expRequests: map[int64]bool{10: true, 11: true},
|
||||
expRequestsSent: 2,
|
||||
expPeerResults: []testPeerResult{{id: "P1", numPendingBlockRequests: 2}},
|
||||
},
|
||||
{
|
||||
name: "multiple peers - stops at gap between height and base",
|
||||
pool: makeBlockPool(testBcR, 10, []BpPeer{
|
||||
{ID: "P1", Base: 1, Height: 12},
|
||||
{ID: "P2", Base: 15, Height: 100},
|
||||
}, map[int64]tPBlocks{}),
|
||||
maxRequestsPerPeer: 10,
|
||||
expRequests: map[int64]bool{10: true, 11: true, 12: true},
|
||||
expRequestsSent: 3,
|
||||
expPeerResults: []testPeerResult{
|
||||
{id: "P1", numPendingBlockRequests: 3},
|
||||
{id: "P2", numPendingBlockRequests: 0},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "n peers - send n*maxRequestsPerPeer block requests",
|
||||
pool: makeBlockPool(
|
||||
testBcR,
|
||||
10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{}),
|
||||
maxRequestsPerPeer: 2,
|
||||
expRequests: map[int64]bool{10: true, 11: true},
|
||||
expRequestsSent: 4,
|
||||
expPeerResults: []testPeerResult{
|
||||
{id: "P1", numPendingBlockRequests: 2},
|
||||
{id: "P2", numPendingBlockRequests: 2}},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
resetPoolTestResults()
|
||||
|
||||
var pool = tt.pool
|
||||
maxRequestsPerPeer = tt.maxRequestsPerPeer
|
||||
pool.MakeNextRequests(10)
|
||||
|
||||
assert.Equal(t, tt.expRequestsSent, testResults.numRequestsSent)
|
||||
for _, tPeer := range tt.expPeerResults {
|
||||
var peer = pool.peers[tPeer.id]
|
||||
assert.NotNil(t, peer)
|
||||
assert.Equal(t, tPeer.numPendingBlockRequests, peer.NumPendingBlockRequests)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolAddBlock(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
block *types.Block
|
||||
blockSize int
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
args args
|
||||
poolWanted *BlockPool
|
||||
errWanted error
|
||||
}{
|
||||
{name: "block from unknown peer",
|
||||
pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
args: args{
|
||||
peerID: "P2",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
errWanted: errBadDataFromPeer,
|
||||
},
|
||||
{name: "unexpected block 11 from known peer - waiting for 10",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P1",
|
||||
block: types.MakeBlock(int64(11), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{name: "unexpected block 10 from known peer - already have 10",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P1",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P1", false}}),
|
||||
errWanted: errDuplicateBlock,
|
||||
},
|
||||
{name: "unexpected block 10 from known peer P2 - expected 10 to come from P1",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P2",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
errWanted: errBadDataFromPeer,
|
||||
},
|
||||
{name: "expected block from known peer",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P1",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}}),
|
||||
errWanted: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.pool.AddBlock(tt.args.peerID, tt.args.block, tt.args.blockSize)
|
||||
assert.Equal(t, tt.errWanted, err)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolFirstTwoBlocksAndPeers(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
firstWanted int64
|
||||
secondWanted int64
|
||||
errWanted error
|
||||
}{
|
||||
{
|
||||
name: "both blocks missing",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{
|
||||
name: "second block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 18: {"P2", true}}),
|
||||
firstWanted: 15,
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{
|
||||
name: "first block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{16: {"P2", true}, 18: {"P2", true}}),
|
||||
secondWanted: 16,
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{
|
||||
name: "both blocks present",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P2", true}}),
|
||||
firstWanted: 10,
|
||||
secondWanted: 11,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pool := tt.pool
|
||||
gotFirst, gotSecond, err := pool.FirstTwoBlocksAndPeers()
|
||||
assert.Equal(t, tt.errWanted, err)
|
||||
|
||||
if tt.firstWanted != 0 {
|
||||
peer := pool.blocks[tt.firstWanted]
|
||||
block := pool.peers[peer].blocks[tt.firstWanted]
|
||||
assert.Equal(t, block, gotFirst.block,
|
||||
"BlockPool.FirstTwoBlocksAndPeers() gotFirst = %v, want %v",
|
||||
tt.firstWanted, gotFirst.block.Height)
|
||||
}
|
||||
|
||||
if tt.secondWanted != 0 {
|
||||
peer := pool.blocks[tt.secondWanted]
|
||||
block := pool.peers[peer].blocks[tt.secondWanted]
|
||||
assert.Equal(t, block, gotSecond.block,
|
||||
"BlockPool.FirstTwoBlocksAndPeers() gotFirst = %v, want %v",
|
||||
tt.secondWanted, gotSecond.block.Height)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolInvalidateFirstTwoBlocks(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "both blocks missing",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
|
||||
},
|
||||
{
|
||||
name: "second block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 18: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{18: {"P2", true}}),
|
||||
},
|
||||
{
|
||||
name: "first block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{18: {"P1", true}, 16: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{18: {"P1", true}}),
|
||||
},
|
||||
{
|
||||
name: "both blocks present",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{},
|
||||
map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.InvalidateFirstTwoBlocks(errNoPeerResponse)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessedCurrentHeightBlock(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "one peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 101, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{101: {"P1", true}}),
|
||||
},
|
||||
{
|
||||
name: "multiple peers",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 101,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.ProcessedCurrentHeightBlock()
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovePeerAtCurrentHeight(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "one peer, remove peer for block at H",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", false}, 101: {"P1", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "one peer, remove peer for block at H+1",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "multiple peers, remove peer for block at H",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", false}, 104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
},
|
||||
{
|
||||
name: "multiple peers, remove peer for block at H+1",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", false}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.RemovePeerAtCurrentHeights(errNoPeerResponse)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,560 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
|
||||
BlockchainChannel = byte(0x40)
|
||||
trySyncIntervalMS = 10
|
||||
trySendIntervalMS = 10
|
||||
|
||||
// ask for best height every 10s
|
||||
statusUpdateIntervalSeconds = 10
|
||||
)
|
||||
|
||||
var (
|
||||
// Maximum number of requests that can be pending per peer, i.e. for which requests have been sent but blocks
|
||||
// have not been received.
|
||||
maxRequestsPerPeer = 20
|
||||
// Maximum number of block requests for the reactor, pending or for which blocks have been received.
|
||||
maxNumRequests = 64
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// for when we switch from blockchain reactor and fast sync to
|
||||
// the consensus machine
|
||||
SwitchToConsensus(state sm.State, skipWAL bool)
|
||||
}
|
||||
|
||||
// BlockchainReactor handles long-term catchup syncing.
|
||||
type BlockchainReactor struct {
|
||||
p2p.BaseReactor
|
||||
|
||||
initialState sm.State // immutable
|
||||
state sm.State
|
||||
|
||||
blockExec *sm.BlockExecutor
|
||||
store *store.BlockStore
|
||||
|
||||
fastSync bool
|
||||
stateSynced bool
|
||||
|
||||
fsm *BcReactorFSM
|
||||
blocksSynced uint64
|
||||
|
||||
// Receive goroutine forwards messages to this channel to be processed in the context of the poolRoutine.
|
||||
messagesForFSMCh chan bcReactorMessage
|
||||
|
||||
// Switch goroutine may send RemovePeer to the blockchain reactor. This is an error message that is relayed
|
||||
// to this channel to be processed in the context of the poolRoutine.
|
||||
errorsForFSMCh chan bcReactorMessage
|
||||
|
||||
// This channel is used by the FSM and indirectly the block pool to report errors to the blockchain reactor and
|
||||
// the switch.
|
||||
eventsFromFSMCh chan bcFsmMessage
|
||||
|
||||
swReporter *behaviour.SwitchReporter
|
||||
}
|
||||
|
||||
// NewBlockchainReactor returns new reactor instance.
|
||||
func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
|
||||
fastSync bool) *BlockchainReactor {
|
||||
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
|
||||
store.Height()))
|
||||
}
|
||||
|
||||
const capacity = 1000
|
||||
eventsFromFSMCh := make(chan bcFsmMessage, capacity)
|
||||
messagesForFSMCh := make(chan bcReactorMessage, capacity)
|
||||
errorsForFSMCh := make(chan bcReactorMessage, capacity)
|
||||
|
||||
startHeight := store.Height() + 1
|
||||
if startHeight == 1 {
|
||||
startHeight = state.InitialHeight
|
||||
}
|
||||
bcR := &BlockchainReactor{
|
||||
initialState: state,
|
||||
state: state,
|
||||
blockExec: blockExec,
|
||||
fastSync: fastSync,
|
||||
store: store,
|
||||
messagesForFSMCh: messagesForFSMCh,
|
||||
eventsFromFSMCh: eventsFromFSMCh,
|
||||
errorsForFSMCh: errorsForFSMCh,
|
||||
}
|
||||
fsm := NewFSM(startHeight, bcR)
|
||||
bcR.fsm = fsm
|
||||
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
|
||||
//bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
|
||||
|
||||
return bcR
|
||||
}
|
||||
|
||||
// bcReactorMessage is used by the reactor to send messages to the FSM.
|
||||
type bcReactorMessage struct {
|
||||
event bReactorEvent
|
||||
data bReactorEventData
|
||||
}
|
||||
|
||||
type bFsmEvent uint
|
||||
|
||||
const (
|
||||
// message type events
|
||||
peerErrorEv = iota + 1
|
||||
syncFinishedEv
|
||||
)
|
||||
|
||||
type bFsmEventData struct {
|
||||
peerID p2p.ID
|
||||
err error
|
||||
}
|
||||
|
||||
// bcFsmMessage is used by the FSM to send messages to the reactor
|
||||
type bcFsmMessage struct {
|
||||
event bFsmEvent
|
||||
data bFsmEventData
|
||||
}
|
||||
|
||||
// SetLogger implements service.Service by setting the logger on reactor and pool.
|
||||
func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
|
||||
bcR.BaseService.Logger = l
|
||||
bcR.fsm.SetLogger(l)
|
||||
}
|
||||
|
||||
// OnStart implements service.Service.
|
||||
func (bcR *BlockchainReactor) OnStart() error {
|
||||
bcR.swReporter = behaviour.NewSwitchReporter(bcR.BaseReactor.Switch)
|
||||
if bcR.fastSync {
|
||||
go bcR.poolRoutine()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements service.Service.
|
||||
func (bcR *BlockchainReactor) OnStop() {
|
||||
_ = bcR.Stop()
|
||||
}
|
||||
|
||||
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
|
||||
func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error {
|
||||
bcR.fastSync = true
|
||||
bcR.initialState = state
|
||||
bcR.state = state
|
||||
bcR.stateSynced = true
|
||||
|
||||
bcR.fsm = NewFSM(state.LastBlockHeight+1, bcR)
|
||||
bcR.fsm.SetLogger(bcR.Logger)
|
||||
go bcR.poolRoutine()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetChannels implements Reactor
|
||||
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: BlockchainChannel,
|
||||
Priority: 10,
|
||||
SendQueueCapacity: 2000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("could not convert msg to protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
peer.Send(BlockchainChannel, msgBytes)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
// bcStatusResponseMessage from the peer and call pool.updatePeer()
|
||||
}
|
||||
|
||||
// sendBlockToPeer loads a block and sends it to the requesting peer.
|
||||
// If the block doesn't exist a bcNoBlockResponseMessage is sent.
|
||||
// If all nodes are honest, no node should be requesting for a block that doesn't exist.
|
||||
func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcproto.BlockRequest,
|
||||
src p2p.Peer) (queued bool) {
|
||||
|
||||
block := bcR.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
pbbi, err := block.ToProto()
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Could not send block message to peer", "err", err)
|
||||
return false
|
||||
}
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: pbbi})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("unable to marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: msg.Height})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("unable to marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcproto.StatusRequest, src p2p.Peer) (queued bool) {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{
|
||||
Base: bcR.store.Base(),
|
||||
Height: bcR.store.Height(),
|
||||
})
|
||||
if err != nil {
|
||||
bcR.Logger.Error("unable to marshal msg", "err", err)
|
||||
return false
|
||||
}
|
||||
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
// RemovePeer implements Reactor by removing peer from the pool.
|
||||
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
msgData := bcReactorMessage{
|
||||
event: peerRemoveEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peer.ID(),
|
||||
err: errSwitchRemovesPeer,
|
||||
},
|
||||
}
|
||||
bcR.errorsForFSMCh <- msgData
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := bc.DecodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("error decoding message",
|
||||
"src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if err = bc.ValidateMsg(msg); err != nil {
|
||||
bcR.Logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *bcproto.BlockRequest:
|
||||
if queued := bcR.sendBlockToPeer(msg, src); !queued {
|
||||
// Unfortunately not queued since the queue is full.
|
||||
bcR.Logger.Error("Could not send block message to peer", "src", src, "height", msg.Height)
|
||||
}
|
||||
|
||||
case *bcproto.StatusRequest:
|
||||
// Send peer our state.
|
||||
if queued := bcR.sendStatusResponseToPeer(msg, src); !queued {
|
||||
// Unfortunately not queued since the queue is full.
|
||||
bcR.Logger.Error("Could not send status message to peer", "src", src)
|
||||
}
|
||||
|
||||
case *bcproto.BlockResponse:
|
||||
bi, err := types.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("error transition block from protobuf", "err", err)
|
||||
return
|
||||
}
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: src.ID(),
|
||||
height: bi.Height,
|
||||
block: bi,
|
||||
length: len(msgBytes),
|
||||
},
|
||||
}
|
||||
bcR.Logger.Info("Received", "src", src, "height", bi.Height)
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
|
||||
case *bcproto.StatusResponse:
|
||||
// Got a peer status. Unverified.
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: statusResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: src.ID(),
|
||||
height: msg.Height,
|
||||
length: len(msgBytes),
|
||||
},
|
||||
}
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
|
||||
default:
|
||||
bcR.Logger.Error(fmt.Sprintf("unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
}
|
||||
|
||||
// processBlocksRoutine processes blocks until signlaed to stop over the stopProcessing channel
|
||||
func (bcR *BlockchainReactor) processBlocksRoutine(stopProcessing chan struct{}) {
|
||||
|
||||
processReceivedBlockTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
doProcessBlockCh := make(chan struct{}, 1)
|
||||
|
||||
lastHundred := time.Now()
|
||||
lastRate := 0.0
|
||||
|
||||
ForLoop:
|
||||
for {
|
||||
select {
|
||||
case <-stopProcessing:
|
||||
bcR.Logger.Info("finishing block execution")
|
||||
break ForLoop
|
||||
case <-processReceivedBlockTicker.C: // try to execute blocks
|
||||
select {
|
||||
case doProcessBlockCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-doProcessBlockCh:
|
||||
for {
|
||||
err := bcR.processBlock()
|
||||
if err == errMissingBlock {
|
||||
break
|
||||
}
|
||||
// Notify FSM of block processing result.
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: processedBlockEv,
|
||||
data: bReactorEventData{
|
||||
err: err,
|
||||
},
|
||||
}
|
||||
_ = bcR.fsm.Handle(&msgForFSM)
|
||||
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
bcR.blocksSynced++
|
||||
if bcR.blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
height, maxPeerHeight := bcR.fsm.Status()
|
||||
bcR.Logger.Info("Fast Sync Rate", "height", height,
|
||||
"max_peer_height", maxPeerHeight, "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// poolRoutine receives and handles messages from the Receive() routine and from the FSM.
|
||||
func (bcR *BlockchainReactor) poolRoutine() {
|
||||
|
||||
bcR.fsm.Start()
|
||||
|
||||
sendBlockRequestTicker := time.NewTicker(trySendIntervalMS * time.Millisecond)
|
||||
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
|
||||
|
||||
stopProcessing := make(chan struct{}, 1)
|
||||
go bcR.processBlocksRoutine(stopProcessing)
|
||||
|
||||
ForLoop:
|
||||
for {
|
||||
select {
|
||||
|
||||
case <-sendBlockRequestTicker.C:
|
||||
if !bcR.fsm.NeedsBlocks() {
|
||||
continue
|
||||
}
|
||||
_ = bcR.fsm.Handle(&bcReactorMessage{
|
||||
event: makeRequestsEv,
|
||||
data: bReactorEventData{
|
||||
maxNumRequests: maxNumRequests}})
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// Ask for status updates.
|
||||
go bcR.sendStatusRequest()
|
||||
|
||||
case msg := <-bcR.messagesForFSMCh:
|
||||
// Sent from the Receive() routine when status (statusResponseEv) and
|
||||
// block (blockResponseEv) response events are received
|
||||
_ = bcR.fsm.Handle(&msg)
|
||||
|
||||
case msg := <-bcR.errorsForFSMCh:
|
||||
// Sent from the switch.RemovePeer() routine (RemovePeerEv) and
|
||||
// FSM state timer expiry routine (stateTimeoutEv).
|
||||
_ = bcR.fsm.Handle(&msg)
|
||||
|
||||
case msg := <-bcR.eventsFromFSMCh:
|
||||
switch msg.event {
|
||||
case syncFinishedEv:
|
||||
stopProcessing <- struct{}{}
|
||||
// Sent from the FSM when it enters finished state.
|
||||
break ForLoop
|
||||
case peerErrorEv:
|
||||
// Sent from the FSM when it detects peer error
|
||||
bcR.reportPeerErrorToSwitch(msg.data.err, msg.data.peerID)
|
||||
if msg.data.err == errNoPeerResponse {
|
||||
// Sent from the peer timeout handler routine
|
||||
_ = bcR.fsm.Handle(&bcReactorMessage{
|
||||
event: peerRemoveEv,
|
||||
data: bReactorEventData{
|
||||
peerID: msg.data.peerID,
|
||||
err: msg.data.err,
|
||||
},
|
||||
})
|
||||
}
|
||||
// else {
|
||||
// For slow peers, or errors due to blocks received from wrong peer
|
||||
// the FSM had already removed the peers
|
||||
// }
|
||||
default:
|
||||
bcR.Logger.Error("Event from FSM not supported", "type", msg.event)
|
||||
}
|
||||
|
||||
case <-bcR.Quit():
|
||||
break ForLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) reportPeerErrorToSwitch(err error, peerID p2p.ID) {
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
if peer != nil {
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(peerID, err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) processBlock() error {
|
||||
|
||||
first, second, err := bcR.fsm.FirstTwoBlocks()
|
||||
if err != nil {
|
||||
// We need both to sync the first block.
|
||||
return err
|
||||
}
|
||||
|
||||
chainID := bcR.initialState.ChainID
|
||||
|
||||
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstPartSetHeader := firstParts.Header()
|
||||
firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
|
||||
// Finally, verify the first block using the second's commit
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
err = bcR.state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("error during commit verification", "err", err,
|
||||
"first", first.Height, "second", second.Height)
|
||||
return errBlockVerificationFailure
|
||||
}
|
||||
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
bcR.state, _, err = bcR.blockExec.ApplyBlock(bcR.state, firstID, first)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// sendStatusRequest broadcasts `BlockStore` height.
|
||||
func (bcR *BlockchainReactor) sendStatusRequest() {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
bcR.Switch.Broadcast(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// BlockRequest sends `BlockRequest` height.
|
||||
func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
if peer == nil {
|
||||
return errNilPeerForBlockRequest
|
||||
}
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: height})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
return errSendQueueFull
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
func (bcR *BlockchainReactor) switchToConsensus() {
|
||||
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(bcR.state, bcR.blocksSynced > 0 || bcR.stateSynced)
|
||||
bcR.eventsFromFSMCh <- bcFsmMessage{event: syncFinishedEv}
|
||||
}
|
||||
// else {
|
||||
// Should only happen during testing.
|
||||
// }
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// Called by FSM and pool:
|
||||
// - pool calls when it detects slow peer or when peer times out
|
||||
// - FSM calls when:
|
||||
// - adding a block (addBlock) fails
|
||||
// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks
|
||||
func (bcR *BlockchainReactor) sendPeerError(err error, peerID p2p.ID) {
|
||||
bcR.Logger.Info("sendPeerError:", "peer", peerID, "error", err)
|
||||
msgData := bcFsmMessage{
|
||||
event: peerErrorEv,
|
||||
data: bFsmEventData{
|
||||
peerID: peerID,
|
||||
err: err,
|
||||
},
|
||||
}
|
||||
bcR.eventsFromFSMCh <- msgData
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
func (bcR *BlockchainReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
|
||||
if timer == nil {
|
||||
panic("nil timer pointer parameter")
|
||||
}
|
||||
if *timer == nil {
|
||||
*timer = time.AfterFunc(timeout, func() {
|
||||
msg := bcReactorMessage{
|
||||
event: stateTimeoutEv,
|
||||
data: bReactorEventData{
|
||||
stateName: name,
|
||||
},
|
||||
}
|
||||
bcR.errorsForFSMCh <- msg
|
||||
})
|
||||
} else {
|
||||
(*timer).Reset(timeout)
|
||||
}
|
||||
}
|
||||
@@ -1,453 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Blockchain Reactor State
|
||||
type bcReactorFSMState struct {
|
||||
name string
|
||||
|
||||
// called when transitioning out of current state
|
||||
handle func(*BcReactorFSM, bReactorEvent, bReactorEventData) (next *bcReactorFSMState, err error)
|
||||
// called when entering the state
|
||||
enter func(fsm *BcReactorFSM)
|
||||
|
||||
// timeout to ensure FSM is not stuck in a state forever
|
||||
// the timer is owned and run by the fsm instance
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func (s *bcReactorFSMState) String() string {
|
||||
return s.name
|
||||
}
|
||||
|
||||
// BcReactorFSM is the datastructure for the Blockchain Reactor State Machine
|
||||
type BcReactorFSM struct {
|
||||
logger log.Logger
|
||||
mtx sync.Mutex
|
||||
|
||||
startTime time.Time
|
||||
|
||||
state *bcReactorFSMState
|
||||
stateTimer *time.Timer
|
||||
pool *BlockPool
|
||||
|
||||
// interface used to call the Blockchain reactor to send StatusRequest, BlockRequest, reporting errors, etc.
|
||||
toBcR bcReactor
|
||||
}
|
||||
|
||||
// NewFSM creates a new reactor FSM.
|
||||
func NewFSM(height int64, toBcR bcReactor) *BcReactorFSM {
|
||||
return &BcReactorFSM{
|
||||
state: unknown,
|
||||
startTime: time.Now(),
|
||||
pool: NewBlockPool(height, toBcR),
|
||||
toBcR: toBcR,
|
||||
}
|
||||
}
|
||||
|
||||
// bReactorEventData is part of the message sent by the reactor to the FSM and used by the state handlers.
|
||||
type bReactorEventData struct {
|
||||
peerID p2p.ID
|
||||
err error // for peer error: timeout, slow; for processed block event if error occurred
|
||||
base int64 // for status response
|
||||
height int64 // for status response; for processed block event
|
||||
block *types.Block // for block response
|
||||
stateName string // for state timeout events
|
||||
length int // for block response event, length of received block, used to detect slow peers
|
||||
maxNumRequests int // for request needed event, maximum number of pending requests
|
||||
}
|
||||
|
||||
// Blockchain Reactor Events (the input to the state machine)
|
||||
type bReactorEvent uint
|
||||
|
||||
const (
|
||||
// message type events
|
||||
startFSMEv = iota + 1
|
||||
statusResponseEv
|
||||
blockResponseEv
|
||||
processedBlockEv
|
||||
makeRequestsEv
|
||||
stopFSMEv
|
||||
|
||||
// other events
|
||||
peerRemoveEv = iota + 256
|
||||
stateTimeoutEv
|
||||
)
|
||||
|
||||
func (msg *bcReactorMessage) String() string {
|
||||
var dataStr string
|
||||
|
||||
switch msg.event {
|
||||
case startFSMEv:
|
||||
dataStr = ""
|
||||
case statusResponseEv:
|
||||
dataStr = fmt.Sprintf("peer=%v base=%v height=%v", msg.data.peerID, msg.data.base, msg.data.height)
|
||||
case blockResponseEv:
|
||||
dataStr = fmt.Sprintf("peer=%v block.height=%v length=%v",
|
||||
msg.data.peerID, msg.data.block.Height, msg.data.length)
|
||||
case processedBlockEv:
|
||||
dataStr = fmt.Sprintf("error=%v", msg.data.err)
|
||||
case makeRequestsEv:
|
||||
dataStr = ""
|
||||
case stopFSMEv:
|
||||
dataStr = ""
|
||||
case peerRemoveEv:
|
||||
dataStr = fmt.Sprintf("peer: %v is being removed by the switch", msg.data.peerID)
|
||||
case stateTimeoutEv:
|
||||
dataStr = fmt.Sprintf("state=%v", msg.data.stateName)
|
||||
default:
|
||||
dataStr = "cannot interpret message data"
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v: %v", msg.event, dataStr)
|
||||
}
|
||||
|
||||
func (ev bReactorEvent) String() string {
|
||||
switch ev {
|
||||
case startFSMEv:
|
||||
return "startFSMEv"
|
||||
case statusResponseEv:
|
||||
return "statusResponseEv"
|
||||
case blockResponseEv:
|
||||
return "blockResponseEv"
|
||||
case processedBlockEv:
|
||||
return "processedBlockEv"
|
||||
case makeRequestsEv:
|
||||
return "makeRequestsEv"
|
||||
case stopFSMEv:
|
||||
return "stopFSMEv"
|
||||
case peerRemoveEv:
|
||||
return "peerRemoveEv"
|
||||
case stateTimeoutEv:
|
||||
return "stateTimeoutEv"
|
||||
default:
|
||||
return "event unknown"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// states
|
||||
var (
|
||||
unknown *bcReactorFSMState
|
||||
waitForPeer *bcReactorFSMState
|
||||
waitForBlock *bcReactorFSMState
|
||||
finished *bcReactorFSMState
|
||||
)
|
||||
|
||||
// timeouts for state timers
|
||||
const (
|
||||
waitForPeerTimeout = 3 * time.Second
|
||||
waitForBlockAtCurrentHeightTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
// errors
|
||||
var (
|
||||
// internal to the package
|
||||
errNoErrorFinished = errors.New("fast sync is finished")
|
||||
errInvalidEvent = errors.New("invalid event in current state")
|
||||
errMissingBlock = errors.New("missing blocks")
|
||||
errNilPeerForBlockRequest = errors.New("peer for block request does not exist in the switch")
|
||||
errSendQueueFull = errors.New("block request not made, send-queue is full")
|
||||
errPeerTooShort = errors.New("peer height too low, old peer removed/ new peer not added")
|
||||
errSwitchRemovesPeer = errors.New("switch is removing peer")
|
||||
errTimeoutEventWrongState = errors.New("timeout event for a state different than the current one")
|
||||
errNoTallerPeer = errors.New("fast sync timed out on waiting for a peer taller than this node")
|
||||
|
||||
// reported eventually to the switch
|
||||
// handle return
|
||||
errPeerLowersItsHeight = errors.New("fast sync peer reports a height lower than previous")
|
||||
// handle return
|
||||
errNoPeerResponseForCurrentHeights = errors.New("fast sync timed out on peer block response for current heights")
|
||||
errNoPeerResponse = errors.New("fast sync timed out on peer block response") // xx
|
||||
errBadDataFromPeer = errors.New("fast sync received block from wrong peer or block is bad") // xx
|
||||
errDuplicateBlock = errors.New("fast sync received duplicate block from peer")
|
||||
errBlockVerificationFailure = errors.New("fast sync block verification failure") // xx
|
||||
errSlowPeer = errors.New("fast sync peer is not sending us data fast enough") // xx
|
||||
|
||||
)
|
||||
|
||||
func init() {
|
||||
unknown = &bcReactorFSMState{
|
||||
name: "unknown",
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
switch ev {
|
||||
case startFSMEv:
|
||||
// Broadcast Status message. Currently doesn't return non-nil error.
|
||||
fsm.toBcR.sendStatusRequest()
|
||||
return waitForPeer, nil
|
||||
|
||||
case stopFSMEv:
|
||||
return finished, errNoErrorFinished
|
||||
|
||||
default:
|
||||
return unknown, errInvalidEvent
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
waitForPeer = &bcReactorFSMState{
|
||||
name: "waitForPeer",
|
||||
timeout: waitForPeerTimeout,
|
||||
enter: func(fsm *BcReactorFSM) {
|
||||
// Stop when leaving the state.
|
||||
fsm.resetStateTimer()
|
||||
},
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
switch ev {
|
||||
case stateTimeoutEv:
|
||||
if data.stateName != "waitForPeer" {
|
||||
fsm.logger.Error("received a state timeout event for different state",
|
||||
"state", data.stateName)
|
||||
return waitForPeer, errTimeoutEventWrongState
|
||||
}
|
||||
// There was no statusResponse received from any peer.
|
||||
// Should we send status request again?
|
||||
return finished, errNoTallerPeer
|
||||
|
||||
case statusResponseEv:
|
||||
if err := fsm.pool.UpdatePeer(data.peerID, data.base, data.height); err != nil {
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, err
|
||||
}
|
||||
}
|
||||
if fsm.stateTimer != nil {
|
||||
fsm.stateTimer.Stop()
|
||||
}
|
||||
return waitForBlock, nil
|
||||
|
||||
case stopFSMEv:
|
||||
if fsm.stateTimer != nil {
|
||||
fsm.stateTimer.Stop()
|
||||
}
|
||||
return finished, errNoErrorFinished
|
||||
|
||||
default:
|
||||
return waitForPeer, errInvalidEvent
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
waitForBlock = &bcReactorFSMState{
|
||||
name: "waitForBlock",
|
||||
timeout: waitForBlockAtCurrentHeightTimeout,
|
||||
enter: func(fsm *BcReactorFSM) {
|
||||
// Stop when leaving the state.
|
||||
fsm.resetStateTimer()
|
||||
},
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
switch ev {
|
||||
|
||||
case statusResponseEv:
|
||||
err := fsm.pool.UpdatePeer(data.peerID, data.base, data.height)
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, err
|
||||
}
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, err
|
||||
}
|
||||
return waitForBlock, err
|
||||
|
||||
case blockResponseEv:
|
||||
fsm.logger.Debug("blockResponseEv", "H", data.block.Height)
|
||||
err := fsm.pool.AddBlock(data.peerID, data.block, data.length)
|
||||
if err != nil {
|
||||
// A block was received that was unsolicited, from unexpected peer, or that we already have it.
|
||||
// Ignore block, remove peer and send error to switch.
|
||||
fsm.pool.RemovePeer(data.peerID, err)
|
||||
fsm.toBcR.sendPeerError(err, data.peerID)
|
||||
}
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, err
|
||||
}
|
||||
return waitForBlock, err
|
||||
|
||||
case processedBlockEv:
|
||||
if data.err != nil {
|
||||
first, second, _ := fsm.pool.FirstTwoBlocksAndPeers()
|
||||
fsm.logger.Error("error processing block", "err", data.err,
|
||||
"first", first.block.Height, "second", second.block.Height)
|
||||
fsm.logger.Error("send peer error for", "peer", first.peer.ID)
|
||||
fsm.toBcR.sendPeerError(data.err, first.peer.ID)
|
||||
fsm.logger.Error("send peer error for", "peer", second.peer.ID)
|
||||
fsm.toBcR.sendPeerError(data.err, second.peer.ID)
|
||||
// Remove the first two blocks. This will also remove the peers
|
||||
fsm.pool.InvalidateFirstTwoBlocks(data.err)
|
||||
} else {
|
||||
fsm.pool.ProcessedCurrentHeightBlock()
|
||||
// Since we advanced one block reset the state timer
|
||||
fsm.resetStateTimer()
|
||||
}
|
||||
|
||||
// Both cases above may result in achieving maximum height.
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, nil
|
||||
}
|
||||
|
||||
return waitForBlock, data.err
|
||||
|
||||
case peerRemoveEv:
|
||||
// This event is sent by the switch to remove disconnected and errored peers.
|
||||
fsm.pool.RemovePeer(data.peerID, data.err)
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, nil
|
||||
}
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, nil
|
||||
}
|
||||
return waitForBlock, nil
|
||||
|
||||
case makeRequestsEv:
|
||||
fsm.makeNextRequests(data.maxNumRequests)
|
||||
return waitForBlock, nil
|
||||
|
||||
case stateTimeoutEv:
|
||||
if data.stateName != "waitForBlock" {
|
||||
fsm.logger.Error("received a state timeout event for different state",
|
||||
"state", data.stateName)
|
||||
return waitForBlock, errTimeoutEventWrongState
|
||||
}
|
||||
// We haven't received the block at current height or height+1. Remove peer.
|
||||
fsm.pool.RemovePeerAtCurrentHeights(errNoPeerResponseForCurrentHeights)
|
||||
fsm.resetStateTimer()
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, errNoPeerResponseForCurrentHeights
|
||||
}
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, nil
|
||||
}
|
||||
return waitForBlock, errNoPeerResponseForCurrentHeights
|
||||
|
||||
case stopFSMEv:
|
||||
if fsm.stateTimer != nil {
|
||||
fsm.stateTimer.Stop()
|
||||
}
|
||||
return finished, errNoErrorFinished
|
||||
|
||||
default:
|
||||
return waitForBlock, errInvalidEvent
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
finished = &bcReactorFSMState{
|
||||
name: "finished",
|
||||
enter: func(fsm *BcReactorFSM) {
|
||||
fsm.logger.Info("Time to switch to consensus reactor!", "height", fsm.pool.Height)
|
||||
fsm.toBcR.switchToConsensus()
|
||||
fsm.cleanup()
|
||||
},
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
return finished, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Interface used by FSM for sending Block and Status requests,
|
||||
// informing of peer errors and state timeouts
|
||||
// Implemented by BlockchainReactor and tests
|
||||
type bcReactor interface {
|
||||
sendStatusRequest()
|
||||
sendBlockRequest(peerID p2p.ID, height int64) error
|
||||
sendPeerError(err error, peerID p2p.ID)
|
||||
resetStateTimer(name string, timer **time.Timer, timeout time.Duration)
|
||||
switchToConsensus()
|
||||
}
|
||||
|
||||
// SetLogger sets the FSM logger.
|
||||
func (fsm *BcReactorFSM) SetLogger(l log.Logger) {
|
||||
fsm.logger = l
|
||||
fsm.pool.SetLogger(l)
|
||||
}
|
||||
|
||||
// Start starts the FSM.
|
||||
func (fsm *BcReactorFSM) Start() {
|
||||
_ = fsm.Handle(&bcReactorMessage{event: startFSMEv})
|
||||
}
|
||||
|
||||
// Handle processes messages and events sent to the FSM.
|
||||
func (fsm *BcReactorFSM) Handle(msg *bcReactorMessage) error {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
fsm.logger.Debug("FSM received", "event", msg, "state", fsm.state)
|
||||
|
||||
if fsm.state == nil {
|
||||
fsm.state = unknown
|
||||
}
|
||||
next, err := fsm.state.handle(fsm, msg.event, msg.data)
|
||||
if err != nil {
|
||||
fsm.logger.Error("FSM event handler returned", "err", err,
|
||||
"state", fsm.state, "event", msg.event)
|
||||
}
|
||||
|
||||
oldState := fsm.state.name
|
||||
fsm.transition(next)
|
||||
if oldState != fsm.state.name {
|
||||
fsm.logger.Info("FSM changed state", "new_state", fsm.state)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) transition(next *bcReactorFSMState) {
|
||||
if next == nil {
|
||||
return
|
||||
}
|
||||
if fsm.state != next {
|
||||
fsm.state = next
|
||||
if next.enter != nil {
|
||||
next.enter(fsm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Called when entering an FSM state in order to detect lack of progress in the state machine.
|
||||
// Note the use of the 'bcr' interface to facilitate testing without timer expiring.
|
||||
func (fsm *BcReactorFSM) resetStateTimer() {
|
||||
fsm.toBcR.resetStateTimer(fsm.state.name, &fsm.stateTimer, fsm.state.timeout)
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) isCaughtUp() bool {
|
||||
return fsm.state == finished
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) makeNextRequests(maxNumRequests int) {
|
||||
fsm.pool.MakeNextRequests(maxNumRequests)
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) cleanup() {
|
||||
fsm.pool.Cleanup()
|
||||
}
|
||||
|
||||
// NeedsBlocks checks if more block requests are required.
|
||||
func (fsm *BcReactorFSM) NeedsBlocks() bool {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
return fsm.state.name == "waitForBlock" && fsm.pool.NeedsBlocks()
|
||||
}
|
||||
|
||||
// FirstTwoBlocks returns the two blocks at pool height and height+1
|
||||
func (fsm *BcReactorFSM) FirstTwoBlocks() (first, second *types.Block, err error) {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
firstBP, secondBP, err := fsm.pool.FirstTwoBlocksAndPeers()
|
||||
if err == nil {
|
||||
first = firstBP.block
|
||||
second = secondBP.block
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Status returns the pool's height and the maximum peer height.
|
||||
func (fsm *BcReactorFSM) Status() (height, maxPeerHeight int64) {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
return fsm.pool.Height, fsm.pool.MaxPeerHeight
|
||||
}
|
||||
@@ -1,944 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type lastBlockRequestT struct {
|
||||
peerID p2p.ID
|
||||
height int64
|
||||
}
|
||||
|
||||
type lastPeerErrorT struct {
|
||||
peerID p2p.ID
|
||||
err error
|
||||
}
|
||||
|
||||
// reactor for FSM testing
|
||||
type testReactor struct {
|
||||
logger log.Logger
|
||||
fsm *BcReactorFSM
|
||||
numStatusRequests int
|
||||
numBlockRequests int
|
||||
lastBlockRequest lastBlockRequestT
|
||||
lastPeerError lastPeerErrorT
|
||||
stateTimerStarts map[string]int
|
||||
}
|
||||
|
||||
func sendEventToFSM(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) error {
|
||||
return fsm.Handle(&bcReactorMessage{event: ev, data: data})
|
||||
}
|
||||
|
||||
type fsmStepTestValues struct {
|
||||
currentState string
|
||||
event bReactorEvent
|
||||
data bReactorEventData
|
||||
|
||||
wantErr error
|
||||
wantState string
|
||||
wantStatusReqSent bool
|
||||
wantReqIncreased bool
|
||||
wantNewBlocks []int64
|
||||
wantRemovedPeers []p2p.ID
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// helper test function for different FSM events, state and expected behavior
|
||||
func sStopFSMEv(current, expected string) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: stopFSMEv,
|
||||
wantState: expected,
|
||||
wantErr: errNoErrorFinished}
|
||||
}
|
||||
|
||||
func sUnknownFSMEv(current string) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: 1234,
|
||||
wantState: current,
|
||||
wantErr: errInvalidEvent}
|
||||
}
|
||||
|
||||
func sStartFSMEv() fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: "unknown",
|
||||
event: startFSMEv,
|
||||
wantState: "waitForPeer",
|
||||
wantStatusReqSent: true}
|
||||
}
|
||||
|
||||
func sStateTimeoutEv(current, expected string, timedoutState string, wantErr error) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: stateTimeoutEv,
|
||||
data: bReactorEventData{
|
||||
stateName: timedoutState,
|
||||
},
|
||||
wantState: expected,
|
||||
wantErr: wantErr,
|
||||
}
|
||||
}
|
||||
|
||||
func sProcessedBlockEv(current, expected string, reactorError error) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: processedBlockEv,
|
||||
data: bReactorEventData{
|
||||
err: reactorError,
|
||||
},
|
||||
wantState: expected,
|
||||
wantErr: reactorError,
|
||||
}
|
||||
}
|
||||
|
||||
func sStatusEv(current, expected string, peerID p2p.ID, height int64, err error) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: statusResponseEv,
|
||||
data: bReactorEventData{peerID: peerID, height: height},
|
||||
wantState: expected,
|
||||
wantErr: err}
|
||||
}
|
||||
|
||||
func sMakeRequestsEv(current, expected string, maxPendingRequests int) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: makeRequestsEv,
|
||||
data: bReactorEventData{maxNumRequests: maxPendingRequests},
|
||||
wantState: expected,
|
||||
wantReqIncreased: true,
|
||||
}
|
||||
}
|
||||
|
||||
func sMakeRequestsEvErrored(current, expected string,
|
||||
maxPendingRequests int, err error, peersRemoved []p2p.ID) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: makeRequestsEv,
|
||||
data: bReactorEventData{maxNumRequests: maxPendingRequests},
|
||||
wantState: expected,
|
||||
wantErr: err,
|
||||
wantRemovedPeers: peersRemoved,
|
||||
wantReqIncreased: true,
|
||||
}
|
||||
}
|
||||
|
||||
func sBlockRespEv(current, expected string, peerID p2p.ID, height int64, prevBlocks []int64) fsmStepTestValues {
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peerID,
|
||||
height: height,
|
||||
block: types.MakeBlock(height, txs, nil, nil),
|
||||
length: 100},
|
||||
wantState: expected,
|
||||
wantNewBlocks: append(prevBlocks, height),
|
||||
}
|
||||
}
|
||||
|
||||
func sBlockRespEvErrored(current, expected string,
|
||||
peerID p2p.ID, height int64, prevBlocks []int64, wantErr error, peersRemoved []p2p.ID) fsmStepTestValues {
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peerID,
|
||||
height: height,
|
||||
block: types.MakeBlock(height, txs, nil, nil),
|
||||
length: 100},
|
||||
wantState: expected,
|
||||
wantErr: wantErr,
|
||||
wantRemovedPeers: peersRemoved,
|
||||
wantNewBlocks: prevBlocks,
|
||||
}
|
||||
}
|
||||
|
||||
func sPeerRemoveEv(current, expected string, peerID p2p.ID, err error, peersRemoved []p2p.ID) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: peerRemoveEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peerID,
|
||||
err: err,
|
||||
},
|
||||
wantState: expected,
|
||||
wantRemovedPeers: peersRemoved,
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------
|
||||
|
||||
func newTestReactor(height int64) *testReactor {
|
||||
testBcR := &testReactor{logger: log.TestingLogger(), stateTimerStarts: make(map[string]int)}
|
||||
testBcR.fsm = NewFSM(height, testBcR)
|
||||
testBcR.fsm.SetLogger(testBcR.logger)
|
||||
return testBcR
|
||||
}
|
||||
|
||||
func fixBlockResponseEvStep(step *fsmStepTestValues, testBcR *testReactor) {
|
||||
// There is currently no good way to know to which peer a block request was sent.
|
||||
// So in some cases where it does not matter, before we simulate a block response
|
||||
// we cheat and look where it is expected from.
|
||||
if step.event == blockResponseEv {
|
||||
height := step.data.height
|
||||
peerID, ok := testBcR.fsm.pool.blocks[height]
|
||||
if ok {
|
||||
step.data.peerID = peerID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testFields struct {
|
||||
name string
|
||||
startingHeight int64
|
||||
maxRequestsPerPeer int
|
||||
maxPendingRequests int
|
||||
steps []fsmStepTestValues
|
||||
}
|
||||
|
||||
func executeFSMTests(t *testing.T, tests []testFields, matchRespToReq bool) {
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create test reactor
|
||||
testBcR := newTestReactor(tt.startingHeight)
|
||||
|
||||
if tt.maxRequestsPerPeer != 0 {
|
||||
maxRequestsPerPeer = tt.maxRequestsPerPeer
|
||||
}
|
||||
|
||||
for _, step := range tt.steps {
|
||||
step := step
|
||||
assert.Equal(t, step.currentState, testBcR.fsm.state.name)
|
||||
|
||||
var heightBefore int64
|
||||
if step.event == processedBlockEv && step.data.err == errBlockVerificationFailure {
|
||||
heightBefore = testBcR.fsm.pool.Height
|
||||
}
|
||||
oldNumStatusRequests := testBcR.numStatusRequests
|
||||
oldNumBlockRequests := testBcR.numBlockRequests
|
||||
if matchRespToReq {
|
||||
fixBlockResponseEvStep(&step, testBcR)
|
||||
}
|
||||
|
||||
fsmErr := sendEventToFSM(testBcR.fsm, step.event, step.data)
|
||||
assert.Equal(t, step.wantErr, fsmErr)
|
||||
|
||||
if step.wantStatusReqSent {
|
||||
assert.Equal(t, oldNumStatusRequests+1, testBcR.numStatusRequests)
|
||||
} else {
|
||||
assert.Equal(t, oldNumStatusRequests, testBcR.numStatusRequests)
|
||||
}
|
||||
|
||||
if step.wantReqIncreased {
|
||||
assert.True(t, oldNumBlockRequests < testBcR.numBlockRequests)
|
||||
} else {
|
||||
assert.Equal(t, oldNumBlockRequests, testBcR.numBlockRequests)
|
||||
}
|
||||
|
||||
for _, height := range step.wantNewBlocks {
|
||||
_, err := testBcR.fsm.pool.BlockAndPeerAtHeight(height)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
if step.event == processedBlockEv && step.data.err == errBlockVerificationFailure {
|
||||
heightAfter := testBcR.fsm.pool.Height
|
||||
assert.Equal(t, heightBefore, heightAfter)
|
||||
firstAfter, err1 := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height)
|
||||
secondAfter, err2 := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height + 1)
|
||||
assert.NotNil(t, err1)
|
||||
assert.NotNil(t, err2)
|
||||
assert.Nil(t, firstAfter)
|
||||
assert.Nil(t, secondAfter)
|
||||
}
|
||||
|
||||
assert.Equal(t, step.wantState, testBcR.fsm.state.name)
|
||||
|
||||
if step.wantState == "finished" {
|
||||
assert.True(t, testBcR.fsm.isCaughtUp())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSMBasic(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "one block, one peer - TS2",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 2,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 2, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multi block, multi peer - TS2",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 2,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 4, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 4, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 4, []int64{1, 2, 3}),
|
||||
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, true)
|
||||
}
|
||||
|
||||
func TestFSMBlockVerificationFailure(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "block verification failure - TS2 variant",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
|
||||
// add P1 and get blocks 1-3 from it
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
|
||||
// process block failure, should remove P1 and all blocks
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", errBlockVerificationFailure),
|
||||
|
||||
// get blocks 1-3 from P2
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}),
|
||||
|
||||
// finish after processing blocks 1 and 2
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMBadBlockFromPeer(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "block we haven't asked for",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and ask for blocks 1-3
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 300, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// blockResponseEv for height 100 should cause an error
|
||||
sBlockRespEvErrored("waitForBlock", "waitForPeer",
|
||||
"P1", 100, []int64{}, errMissingBlock, []p2p.ID{}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block we already have",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and get block 1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 100, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P1", 1, []int64{}),
|
||||
|
||||
// Get block 1 again. Since peer is removed together with block 1,
|
||||
// the blocks present in the pool should be {}
|
||||
sBlockRespEvErrored("waitForBlock", "waitForPeer",
|
||||
"P1", 1, []int64{}, errDuplicateBlock, []p2p.ID{"P1"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block from unknown peer",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and get block 1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
|
||||
// get block 1 from unknown peer P2
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEvErrored("waitForBlock", "waitForBlock",
|
||||
"P2", 1, []int64{}, errBadDataFromPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block from wrong peer",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, make requests for blocks 1-3 to P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
|
||||
// receive block 1 from P2
|
||||
sBlockRespEvErrored("waitForBlock", "waitForBlock",
|
||||
"P2", 1, []int64{}, errBadDataFromPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMBlockAtCurrentHeightDoesNotArriveInTime(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "block at current height undelivered - TS5",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, get blocks 1 and 2, process block 1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P1", 2, []int64{1}),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
|
||||
// timeout on block 3, P1 should be removed
|
||||
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForBlock", errNoPeerResponseForCurrentHeights),
|
||||
|
||||
// make requests and finish by receiving blocks 2 and 3 from P2
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{2}),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block at current height undelivered, at maxPeerHeight after peer removal - TS3",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, request blocks 1-3 from P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// add P2 (tallest)
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 30, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// receive blocks 1-3 from P1
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}),
|
||||
|
||||
// process blocks at heights 1 and 2
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// timeout on block at height 4
|
||||
sStateTimeoutEv("waitForBlock", "finished", "waitForBlock", nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, true)
|
||||
}
|
||||
|
||||
func TestFSMPeerRelatedEvents(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "peer remove event with no blocks",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, P2, P3
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P3", 3, nil),
|
||||
|
||||
// switch removes P2
|
||||
sPeerRemoveEv("waitForBlock", "waitForBlock", "P2", errSwitchRemovesPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only peer removed while in waitForBlock state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
|
||||
|
||||
// switch removes P1
|
||||
sPeerRemoveEv("waitForBlock", "waitForPeer", "P1", errSwitchRemovesPeer, []p2p.ID{"P1"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "highest peer removed while in waitForBlock state, node reaches maxPeerHeight - TS4 ",
|
||||
startingHeight: 100,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and make requests
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 101, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 200, nil),
|
||||
|
||||
// get blocks 100 and 101 from P1 and process block at height 100
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 100, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 101, []int64{100}),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// switch removes peer P1, should be finished
|
||||
sPeerRemoveEv("waitForBlock", "finished", "P2", errSwitchRemovesPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "highest peer lowers its height in waitForBlock state, node reaches maxPeerHeight - TS4",
|
||||
startingHeight: 100,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and make requests
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 101, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 200, nil),
|
||||
|
||||
// get blocks 100 and 101 from P1
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 100, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 101, []int64{100}),
|
||||
|
||||
// processed block at heights 100
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// P2 becomes short
|
||||
sStatusEv("waitForBlock", "finished", "P2", 100, errPeerLowersItsHeight),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new short peer while in waitForPeer state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForPeer", "P1", 3, errPeerTooShort),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new short peer while in waitForBlock state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, errPeerTooShort),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only peer updated with low height while in waitForBlock state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
|
||||
sStatusEv("waitForBlock", "waitForPeer", "P1", 3, errPeerLowersItsHeight),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "peer does not exist in the switch",
|
||||
startingHeight: 9999999,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 20000000, nil),
|
||||
// send request for block 9999999
|
||||
// Note: For this block request the "switch missing the peer" error is simulated,
|
||||
// see implementation of bcReactor interface, sendBlockRequest(), in this file.
|
||||
sMakeRequestsEvErrored("waitForBlock", "waitForBlock",
|
||||
maxNumRequests, nil, []p2p.ID{"P1"}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, true)
|
||||
}
|
||||
|
||||
func TestFSMStopFSM(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "stopFSMEv in unknown",
|
||||
steps: []fsmStepTestValues{
|
||||
sStopFSMEv("unknown", "finished"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "stopFSMEv in waitForPeer",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStopFSMEv("waitForPeer", "finished"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "stopFSMEv in waitForBlock",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sStopFSMEv("waitForBlock", "finished"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMUnknownElements(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "unknown event for state unknown",
|
||||
steps: []fsmStepTestValues{
|
||||
sUnknownFSMEv("unknown"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown event for state waitForPeer",
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sUnknownFSMEv("waitForPeer"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown event for state waitForBlock",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sUnknownFSMEv("waitForBlock"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMPeerStateTimeoutEvent(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "timeout event for state waitForPeer while in state waitForPeer - TS1",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStateTimeoutEv("waitForPeer", "finished", "waitForPeer", errNoTallerPeer),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForPeer while in a state != waitForPeer",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStateTimeoutEv("waitForPeer", "waitForPeer", "waitForBlock", errTimeoutEventWrongState),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForBlock while in state waitForBlock ",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sStateTimeoutEv("waitForBlock", "waitForPeer", "waitForBlock", errNoPeerResponseForCurrentHeights),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForBlock while in a state != waitForBlock",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForPeer", errTimeoutEventWrongState),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForBlock with multiple peers",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForBlock", errNoPeerResponseForCurrentHeights),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func makeCorrectTransitionSequence(startingHeight int64, numBlocks int64, numPeers int, randomPeerHeights bool,
|
||||
maxRequestsPerPeer int, maxPendingRequests int) testFields {
|
||||
|
||||
// Generate numPeers peers with random or numBlocks heights according to the randomPeerHeights flag.
|
||||
peerHeights := make([]int64, numPeers)
|
||||
for i := 0; i < numPeers; i++ {
|
||||
if i == 0 {
|
||||
peerHeights[0] = numBlocks
|
||||
continue
|
||||
}
|
||||
if randomPeerHeights {
|
||||
peerHeights[i] = int64(tmmath.MaxInt(tmrand.Intn(int(numBlocks)), int(startingHeight)+1))
|
||||
} else {
|
||||
peerHeights[i] = numBlocks
|
||||
}
|
||||
}
|
||||
|
||||
// Approximate the slice capacity to save time for appends.
|
||||
testSteps := make([]fsmStepTestValues, 0, 3*numBlocks+int64(numPeers))
|
||||
|
||||
testName := fmt.Sprintf("%v-blocks %v-startingHeight %v-peers %v-maxRequestsPerPeer %v-maxNumRequests",
|
||||
numBlocks, startingHeight, numPeers, maxRequestsPerPeer, maxPendingRequests)
|
||||
|
||||
// Add startFSMEv step.
|
||||
testSteps = append(testSteps, sStartFSMEv())
|
||||
|
||||
// For each peer, add statusResponseEv step.
|
||||
for i := 0; i < numPeers; i++ {
|
||||
peerName := fmt.Sprintf("P%d", i)
|
||||
if i == 0 {
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sStatusEv("waitForPeer", "waitForBlock", p2p.ID(peerName), peerHeights[i], nil))
|
||||
} else {
|
||||
testSteps = append(testSteps,
|
||||
sStatusEv("waitForBlock", "waitForBlock", p2p.ID(peerName), peerHeights[i], nil))
|
||||
}
|
||||
}
|
||||
|
||||
height := startingHeight
|
||||
numBlocksReceived := 0
|
||||
prevBlocks := make([]int64, 0, maxPendingRequests)
|
||||
|
||||
forLoop:
|
||||
for i := 0; i < int(numBlocks); i++ {
|
||||
|
||||
// Add the makeRequestEv step periodically.
|
||||
if i%maxRequestsPerPeer == 0 {
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
)
|
||||
}
|
||||
|
||||
// Add the blockRespEv step
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P0", height, prevBlocks))
|
||||
prevBlocks = append(prevBlocks, height)
|
||||
height++
|
||||
numBlocksReceived++
|
||||
|
||||
// Add the processedBlockEv step periodically.
|
||||
if numBlocksReceived >= maxRequestsPerPeer || height >= numBlocks {
|
||||
for j := int(height) - numBlocksReceived; j < int(height); j++ {
|
||||
if j >= int(numBlocks) {
|
||||
// This is the last block that is processed, we should be in "finished" state.
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil))
|
||||
break forLoop
|
||||
}
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil))
|
||||
}
|
||||
numBlocksReceived = 0
|
||||
prevBlocks = make([]int64, 0, maxPendingRequests)
|
||||
}
|
||||
}
|
||||
|
||||
return testFields{
|
||||
name: testName,
|
||||
startingHeight: startingHeight,
|
||||
maxRequestsPerPeer: maxRequestsPerPeer,
|
||||
maxPendingRequests: maxPendingRequests,
|
||||
steps: testSteps,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
maxStartingHeightTest = 100
|
||||
maxRequestsPerPeerTest = 20
|
||||
maxTotalPendingRequestsTest = 600
|
||||
maxNumPeersTest = 1000
|
||||
maxNumBlocksInChainTest = 10000 //should be smaller than 9999999
|
||||
)
|
||||
|
||||
func makeCorrectTransitionSequenceWithRandomParameters() testFields {
|
||||
// Generate a starting height for fast sync.
|
||||
startingHeight := int64(tmrand.Intn(maxStartingHeightTest) + 1)
|
||||
|
||||
// Generate the number of requests per peer.
|
||||
maxRequestsPerPeer := tmrand.Intn(maxRequestsPerPeerTest) + 1
|
||||
|
||||
// Generate the maximum number of total pending requests, >= maxRequestsPerPeer.
|
||||
maxPendingRequests := tmrand.Intn(maxTotalPendingRequestsTest-maxRequestsPerPeer) + maxRequestsPerPeer
|
||||
|
||||
// Generate the number of blocks to be synced.
|
||||
numBlocks := int64(tmrand.Intn(maxNumBlocksInChainTest)) + startingHeight
|
||||
|
||||
// Generate a number of peers.
|
||||
numPeers := tmrand.Intn(maxNumPeersTest) + 1
|
||||
|
||||
return makeCorrectTransitionSequence(startingHeight, numBlocks, numPeers, true, maxRequestsPerPeer, maxPendingRequests)
|
||||
}
|
||||
|
||||
func shouldApplyProcessedBlockEvStep(step *fsmStepTestValues, testBcR *testReactor) bool {
|
||||
if step.event == processedBlockEv {
|
||||
_, err := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height)
|
||||
if err == errMissingBlock {
|
||||
return false
|
||||
}
|
||||
_, err = testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height + 1)
|
||||
if err == errMissingBlock {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestFSMCorrectTransitionSequences(t *testing.T) {
|
||||
|
||||
tests := []testFields{
|
||||
makeCorrectTransitionSequence(1, 100, 10, true, 10, 40),
|
||||
makeCorrectTransitionSequenceWithRandomParameters(),
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create test reactor
|
||||
testBcR := newTestReactor(tt.startingHeight)
|
||||
|
||||
if tt.maxRequestsPerPeer != 0 {
|
||||
maxRequestsPerPeer = tt.maxRequestsPerPeer
|
||||
}
|
||||
|
||||
for _, step := range tt.steps {
|
||||
step := step
|
||||
assert.Equal(t, step.currentState, testBcR.fsm.state.name)
|
||||
|
||||
oldNumStatusRequests := testBcR.numStatusRequests
|
||||
fixBlockResponseEvStep(&step, testBcR)
|
||||
if !shouldApplyProcessedBlockEvStep(&step, testBcR) {
|
||||
continue
|
||||
}
|
||||
|
||||
fsmErr := sendEventToFSM(testBcR.fsm, step.event, step.data)
|
||||
assert.Equal(t, step.wantErr, fsmErr)
|
||||
|
||||
if step.wantStatusReqSent {
|
||||
assert.Equal(t, oldNumStatusRequests+1, testBcR.numStatusRequests)
|
||||
} else {
|
||||
assert.Equal(t, oldNumStatusRequests, testBcR.numStatusRequests)
|
||||
}
|
||||
|
||||
assert.Equal(t, step.wantState, testBcR.fsm.state.name)
|
||||
if step.wantState == "finished" {
|
||||
assert.True(t, testBcR.fsm.isCaughtUp())
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------
|
||||
// implements the bcRNotifier
|
||||
func (testR *testReactor) sendPeerError(err error, peerID p2p.ID) {
|
||||
testR.logger.Info("Reactor received sendPeerError call from FSM", "peer", peerID, "err", err)
|
||||
testR.lastPeerError.peerID = peerID
|
||||
testR.lastPeerError.err = err
|
||||
}
|
||||
|
||||
func (testR *testReactor) sendStatusRequest() {
|
||||
testR.logger.Info("Reactor received sendStatusRequest call from FSM")
|
||||
testR.numStatusRequests++
|
||||
}
|
||||
|
||||
func (testR *testReactor) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
testR.logger.Info("Reactor received sendBlockRequest call from FSM", "peer", peerID, "height", height)
|
||||
testR.numBlockRequests++
|
||||
testR.lastBlockRequest.peerID = peerID
|
||||
testR.lastBlockRequest.height = height
|
||||
if height == 9999999 {
|
||||
// simulate switch does not have peer
|
||||
return errNilPeerForBlockRequest
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (testR *testReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
|
||||
testR.logger.Info("Reactor received resetStateTimer call from FSM", "state", name, "timeout", timeout)
|
||||
if _, ok := testR.stateTimerStarts[name]; !ok {
|
||||
testR.stateTimerStarts[name] = 1
|
||||
} else {
|
||||
testR.stateTimerStarts[name]++
|
||||
}
|
||||
}
|
||||
|
||||
func (testR *testReactor) switchToConsensus() {
|
||||
}
|
||||
|
||||
// ----------------------------------------
|
||||
@@ -1,365 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mempool/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
var config *cfg.Config
|
||||
|
||||
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
for i := 0; i < numValidators; i++ {
|
||||
val, privVal := types.RandValidator(randPower, minPower)
|
||||
validators[i] = types.GenesisValidator{
|
||||
PubKey: val.PubKey,
|
||||
Power: val.VotingPower,
|
||||
}
|
||||
privValidators[i] = privVal
|
||||
}
|
||||
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
||||
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: config.ChainID(),
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
func makeVote(
|
||||
t *testing.T,
|
||||
header *types.Header,
|
||||
blockID types.BlockID,
|
||||
valset *types.ValidatorSet,
|
||||
privVal types.PrivValidator) *types.Vote {
|
||||
|
||||
pubKey, err := privVal.GetPubKey()
|
||||
require.NoError(t, err)
|
||||
|
||||
valIdx, _ := valset.GetByAddress(pubKey.Address())
|
||||
vote := &types.Vote{
|
||||
ValidatorAddress: pubKey.Address(),
|
||||
ValidatorIndex: valIdx,
|
||||
Height: header.Height,
|
||||
Round: 1,
|
||||
Timestamp: tmtime.Now(),
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: blockID,
|
||||
}
|
||||
|
||||
vpb := vote.ToProto()
|
||||
|
||||
_ = privVal.SignVote(header.ChainID, vpb)
|
||||
vote.Signature = vpb.Signature
|
||||
|
||||
return vote
|
||||
}
|
||||
|
||||
type BlockchainReactorPair struct {
|
||||
bcR *BlockchainReactor
|
||||
conR *consensusReactorTest
|
||||
}
|
||||
|
||||
func newBlockchainReactor(
|
||||
t *testing.T,
|
||||
logger log.Logger,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64) *BlockchainReactor {
|
||||
if len(privVals) != 1 {
|
||||
panic("only support one validator")
|
||||
}
|
||||
|
||||
app := &testApp{}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error start app: %w", err))
|
||||
}
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
|
||||
}
|
||||
|
||||
// Make the BlockchainReactor itself.
|
||||
// NOTE we have to create and commit the blocks first because
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
stateStore = sm.NewStore(db)
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.EmptyEvidencePool{})
|
||||
if err = stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// let's add some blocks in
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := types.NewCommit(blockHeight-1, 1, types.BlockID{}, nil)
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
|
||||
vote := makeVote(t, &lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0])
|
||||
lastCommit = types.NewCommit(vote.Height, vote.Round, lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("error apply block: %w", err))
|
||||
}
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
|
||||
bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||
bcReactor.SetLogger(logger.With("module", "blockchain"))
|
||||
|
||||
return bcReactor
|
||||
}
|
||||
|
||||
func newBlockchainReactorPair(
|
||||
t *testing.T,
|
||||
logger log.Logger,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
maxBlockHeight int64) BlockchainReactorPair {
|
||||
|
||||
consensusReactor := &consensusReactorTest{}
|
||||
consensusReactor.BaseReactor = *p2p.NewBaseReactor("Consensus reactor", consensusReactor)
|
||||
|
||||
return BlockchainReactorPair{
|
||||
newBlockchainReactor(t, logger, genDoc, privVals, maxBlockHeight),
|
||||
consensusReactor}
|
||||
}
|
||||
|
||||
type consensusReactorTest struct {
|
||||
p2p.BaseReactor // BaseService + p2p.Switch
|
||||
switchedToConsensus bool
|
||||
mtx sync.Mutex
|
||||
}
|
||||
|
||||
func (conR *consensusReactorTest) SwitchToConsensus(state sm.State, blocksSynced bool) {
|
||||
conR.mtx.Lock()
|
||||
defer conR.mtx.Unlock()
|
||||
conR.switchedToConsensus = true
|
||||
}
|
||||
|
||||
func TestFastSyncNoBlockResponse(t *testing.T) {
|
||||
|
||||
config = cfg.ResetTestRoot("blockchain_new_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
maxBlockHeight := int64(65)
|
||||
|
||||
reactorPairs := make([]BlockchainReactorPair, 2)
|
||||
|
||||
logger := log.TestingLogger()
|
||||
reactorPairs[0] = newBlockchainReactorPair(t, logger, genDoc, privVals, maxBlockHeight)
|
||||
reactorPairs[1] = newBlockchainReactorPair(t, logger, genDoc, privVals, 0)
|
||||
|
||||
p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].bcR)
|
||||
s.AddReactor("CONSENSUS", reactorPairs[i].conR)
|
||||
moduleName := fmt.Sprintf("blockchain-%v", i)
|
||||
reactorPairs[i].bcR.SetLogger(logger.With("module", moduleName))
|
||||
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
_ = r.bcR.Stop()
|
||||
_ = r.conR.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
tests := []struct {
|
||||
height int64
|
||||
existent bool
|
||||
}{
|
||||
{maxBlockHeight + 2, false},
|
||||
{10, true},
|
||||
{1, true},
|
||||
{maxBlockHeight + 100, false},
|
||||
}
|
||||
|
||||
for {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
reactorPairs[1].conR.mtx.Lock()
|
||||
if reactorPairs[1].conR.switchedToConsensus {
|
||||
reactorPairs[1].conR.mtx.Unlock()
|
||||
break
|
||||
}
|
||||
reactorPairs[1].conR.mtx.Unlock()
|
||||
}
|
||||
|
||||
assert.Equal(t, maxBlockHeight, reactorPairs[0].bcR.store.Height())
|
||||
|
||||
for _, tt := range tests {
|
||||
block := reactorPairs[1].bcR.store.LoadBlock(tt.height)
|
||||
if tt.existent {
|
||||
assert.True(t, block != nil)
|
||||
} else {
|
||||
assert.True(t, block == nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: This is too hard to test without
|
||||
// an easy way to add test peer to switch
|
||||
// or without significant refactoring of the module.
|
||||
// Alternatively we could actually dial a TCP conn but
|
||||
// that seems extreme.
|
||||
func TestFastSyncBadBlockStopsPeer(t *testing.T) {
|
||||
numNodes := 4
|
||||
maxBlockHeight := int64(148)
|
||||
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
otherChain := newBlockchainReactorPair(t, log.TestingLogger(), genDoc, privVals, maxBlockHeight)
|
||||
defer func() {
|
||||
_ = otherChain.bcR.Stop()
|
||||
_ = otherChain.conR.Stop()
|
||||
}()
|
||||
|
||||
reactorPairs := make([]BlockchainReactorPair, numNodes)
|
||||
logger := make([]log.Logger, numNodes)
|
||||
|
||||
for i := 0; i < numNodes; i++ {
|
||||
logger[i] = log.TestingLogger()
|
||||
height := int64(0)
|
||||
if i == 0 {
|
||||
height = maxBlockHeight
|
||||
}
|
||||
reactorPairs[i] = newBlockchainReactorPair(t, logger[i], genDoc, privVals, height)
|
||||
}
|
||||
|
||||
switches := p2p.MakeConnectedSwitches(config.P2P, numNodes, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
reactorPairs[i].conR.mtx.Lock()
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].bcR)
|
||||
s.AddReactor("CONSENSUS", reactorPairs[i].conR)
|
||||
moduleName := fmt.Sprintf("blockchain-%v", i)
|
||||
reactorPairs[i].bcR.SetLogger(logger[i].With("module", moduleName))
|
||||
reactorPairs[i].conR.mtx.Unlock()
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
_ = r.bcR.Stop()
|
||||
_ = r.conR.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
outerFor:
|
||||
for {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
for i := 0; i < numNodes; i++ {
|
||||
reactorPairs[i].conR.mtx.Lock()
|
||||
if !reactorPairs[i].conR.switchedToConsensus {
|
||||
reactorPairs[i].conR.mtx.Unlock()
|
||||
continue outerFor
|
||||
}
|
||||
reactorPairs[i].conR.mtx.Unlock()
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
//at this time, reactors[0-3] is the newest
|
||||
assert.Equal(t, numNodes-1, reactorPairs[1].bcR.Switch.Peers().Size())
|
||||
|
||||
//mark last reactorPair as an invalid peer
|
||||
reactorPairs[numNodes-1].bcR.store = otherChain.bcR.store
|
||||
|
||||
lastLogger := log.TestingLogger()
|
||||
lastReactorPair := newBlockchainReactorPair(t, lastLogger, genDoc, privVals, 0)
|
||||
reactorPairs = append(reactorPairs, lastReactorPair)
|
||||
|
||||
switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[len(reactorPairs)-1].bcR)
|
||||
s.AddReactor("CONSENSUS", reactorPairs[len(reactorPairs)-1].conR)
|
||||
moduleName := fmt.Sprintf("blockchain-%v", len(reactorPairs)-1)
|
||||
reactorPairs[len(reactorPairs)-1].bcR.SetLogger(lastLogger.With("module", moduleName))
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)...)
|
||||
|
||||
for i := 0; i < len(reactorPairs)-1; i++ {
|
||||
p2p.Connect2Switches(switches, i, len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
for {
|
||||
time.Sleep(1 * time.Second)
|
||||
lastReactorPair.conR.mtx.Lock()
|
||||
if lastReactorPair.conR.switchedToConsensus {
|
||||
lastReactorPair.conR.mtx.Unlock()
|
||||
break
|
||||
}
|
||||
lastReactorPair.conR.mtx.Unlock()
|
||||
|
||||
if lastReactorPair.bcR.Switch.Peers().Size() == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, lastReactorPair.bcR.Switch.Peers().Size() < len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// utility funcs
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
|
||||
type testApp struct {
|
||||
abci.BaseApplication
|
||||
}
|
||||
@@ -1,21 +1,26 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"errors"
|
||||
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type iIO interface {
|
||||
sendBlockRequest(peerID p2p.ID, height int64) error
|
||||
sendBlockToPeer(block *types.Block, peerID p2p.ID) error
|
||||
sendBlockNotFound(height int64, peerID p2p.ID) error
|
||||
sendStatusResponse(base, height int64, peerID p2p.ID) error
|
||||
var (
|
||||
errPeerQueueFull = errors.New("peer queue full")
|
||||
)
|
||||
|
||||
type iIO interface {
|
||||
sendBlockRequest(peer p2p.Peer, height int64) error
|
||||
sendBlockToPeer(block *types.Block, peer p2p.Peer) error
|
||||
sendBlockNotFound(height int64, peer p2p.Peer) error
|
||||
sendStatusResponse(base, height int64, peer p2p.Peer) error
|
||||
|
||||
sendStatusRequest(peer p2p.Peer) error
|
||||
broadcastStatusRequest() error
|
||||
|
||||
trySwitchToConsensus(state state.State, skipWAL bool) bool
|
||||
@@ -42,46 +47,50 @@ type consensusReactor interface {
|
||||
SwitchToConsensus(state state.State, skipWAL bool)
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
peer := sio.sw.Peers().Get(peerID)
|
||||
if peer == nil {
|
||||
return fmt.Errorf("peer not found")
|
||||
func (sio *switchIO) sendBlockRequest(peer p2p.Peer, height int64) error {
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_BlockRequest{
|
||||
BlockRequest: &bcproto.BlockRequest{
|
||||
Height: height,
|
||||
},
|
||||
},
|
||||
}
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockRequest{Height: height})
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
return fmt.Errorf("send queue full")
|
||||
return errPeerQueueFull
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendStatusResponse(base int64, height int64, peerID p2p.ID) error {
|
||||
peer := sio.sw.Peers().Get(peerID)
|
||||
if peer == nil {
|
||||
return fmt.Errorf("peer not found")
|
||||
func (sio *switchIO) sendStatusResponse(base int64, height int64, peer p2p.Peer) error {
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_StatusResponse{
|
||||
StatusResponse: &bcproto.StatusResponse{
|
||||
Height: height,
|
||||
Base: base,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusResponse{Height: height, Base: base})
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return fmt.Errorf("peer queue full")
|
||||
return errPeerQueueFull
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendBlockToPeer(block *types.Block, peerID p2p.ID) error {
|
||||
peer := sio.sw.Peers().Get(peerID)
|
||||
if peer == nil {
|
||||
return fmt.Errorf("peer not found")
|
||||
}
|
||||
func (sio *switchIO) sendBlockToPeer(block *types.Block, peer p2p.Peer) error {
|
||||
if block == nil {
|
||||
panic("trying to send nil block")
|
||||
}
|
||||
@@ -91,29 +100,42 @@ func (sio *switchIO) sendBlockToPeer(block *types.Block, peerID p2p.ID) error {
|
||||
return err
|
||||
}
|
||||
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.BlockResponse{Block: bpb})
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_BlockResponse{
|
||||
BlockResponse: &bcproto.BlockResponse{
|
||||
Block: bpb,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return fmt.Errorf("peer queue full")
|
||||
return errPeerQueueFull
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendBlockNotFound(height int64, peerID p2p.ID) error {
|
||||
peer := sio.sw.Peers().Get(peerID)
|
||||
if peer == nil {
|
||||
return fmt.Errorf("peer not found")
|
||||
func (sio *switchIO) sendBlockNotFound(height int64, peer p2p.Peer) error {
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_NoBlockResponse{
|
||||
NoBlockResponse: &bcproto.NoBlockResponse{
|
||||
Height: height,
|
||||
},
|
||||
},
|
||||
}
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.NoBlockResponse{Height: height})
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return fmt.Errorf("peer queue full")
|
||||
return errPeerQueueFull
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -127,8 +149,33 @@ func (sio *switchIO) trySwitchToConsensus(state state.State, skipWAL bool) bool
|
||||
return ok
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendStatusRequest(peer p2p.Peer) error {
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_StatusRequest{
|
||||
StatusRequest: &bcproto.StatusRequest{},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued {
|
||||
return errPeerQueueFull
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) broadcastStatusRequest() error {
|
||||
msgBytes, err := bc.EncodeMsg(&bcproto.StatusRequest{})
|
||||
msgProto := &bcproto.Message{
|
||||
Sum: &bcproto.Message_StatusRequest{
|
||||
StatusRequest: &bcproto.StatusRequest{},
|
||||
},
|
||||
}
|
||||
|
||||
msgBytes, err := proto.Marshal(msgProto)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -13,15 +13,24 @@ import (
|
||||
type pcBlockVerificationFailure struct {
|
||||
priorityNormal
|
||||
height int64
|
||||
firstPeerID p2p.ID
|
||||
secondPeerID p2p.ID
|
||||
firstPeerID p2p.NodeID
|
||||
secondPeerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (e pcBlockVerificationFailure) String() string {
|
||||
return fmt.Sprintf("pcBlockVerificationFailure{%d 1st peer: %v, 2nd peer: %v}",
|
||||
e.height, e.firstPeerID, e.secondPeerID)
|
||||
}
|
||||
|
||||
// successful block execution
|
||||
type pcBlockProcessed struct {
|
||||
priorityNormal
|
||||
height int64
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (e pcBlockProcessed) String() string {
|
||||
return fmt.Sprintf("pcBlockProcessed{%d peer: %v}", e.height, e.peerID)
|
||||
}
|
||||
|
||||
// processor has finished
|
||||
@@ -37,7 +46,7 @@ func (p pcFinished) Error() string {
|
||||
|
||||
type queueItem struct {
|
||||
block *types.Block
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
type blockQueue map[int64]queueItem
|
||||
@@ -86,10 +95,13 @@ func (state *pcState) synced() bool {
|
||||
return len(state.queue) <= 1
|
||||
}
|
||||
|
||||
func (state *pcState) enqueue(peerID p2p.ID, block *types.Block, height int64) {
|
||||
if _, ok := state.queue[height]; ok {
|
||||
panic("duplicate block enqueued by processor")
|
||||
func (state *pcState) enqueue(peerID p2p.NodeID, block *types.Block, height int64) {
|
||||
if item, ok := state.queue[height]; ok {
|
||||
panic(fmt.Sprintf(
|
||||
"duplicate block %d (%X) enqueued by processor (sent by %v; existing block %X from %v)",
|
||||
height, block.Hash(), peerID, item.block.Hash(), item.peerID))
|
||||
}
|
||||
|
||||
state.queue[height] = queueItem{block: block, peerID: peerID}
|
||||
}
|
||||
|
||||
@@ -98,7 +110,7 @@ func (state *pcState) height() int64 {
|
||||
}
|
||||
|
||||
// purgePeer moves all unprocessed blocks from the queue
|
||||
func (state *pcState) purgePeer(peerID p2p.ID) {
|
||||
func (state *pcState) purgePeer(peerID p2p.NodeID) {
|
||||
// what if height is less than state.height?
|
||||
for height, item := range state.queue {
|
||||
if item.peerID == peerID {
|
||||
@@ -145,16 +157,20 @@ func (state *pcState) handle(event Event) (Event, error) {
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
first, second := firstItem.block, secondItem.block
|
||||
|
||||
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstPartSetHeader := firstParts.Header()
|
||||
firstID := types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
|
||||
var (
|
||||
first, second = firstItem.block, secondItem.block
|
||||
firstParts = first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstParts.Header()}
|
||||
)
|
||||
|
||||
// verify if +second+ last commit "confirms" +first+ block
|
||||
err = state.context.verifyCommit(tmState.ChainID, firstID, first.Height, second.LastCommit)
|
||||
if err != nil {
|
||||
state.purgePeer(firstItem.peerID)
|
||||
state.purgePeer(secondItem.peerID)
|
||||
if firstItem.peerID != secondItem.peerID {
|
||||
state.purgePeer(secondItem.peerID)
|
||||
}
|
||||
return pcBlockVerificationFailure{
|
||||
height: first.Height, firstPeerID: firstItem.peerID, secondPeerID: secondItem.peerID},
|
||||
nil
|
||||
@@ -170,7 +186,6 @@ func (state *pcState) handle(event Event) (Event, error) {
|
||||
state.blocksSynced++
|
||||
|
||||
return pcBlockProcessed{height: first.Height, peerID: firstItem.peerID}, nil
|
||||
|
||||
}
|
||||
|
||||
return noOp, nil
|
||||
|
||||
@@ -40,7 +40,7 @@ func makeState(p *params) *pcState {
|
||||
state := newPcState(context)
|
||||
|
||||
for _, item := range p.items {
|
||||
state.enqueue(p2p.ID(item.pid), makePcBlock(item.height), item.height)
|
||||
state.enqueue(p2p.NodeID(item.pid), makePcBlock(item.height), item.height)
|
||||
}
|
||||
|
||||
state.blocksSynced = p.blocksSynced
|
||||
@@ -48,7 +48,7 @@ func makeState(p *params) *pcState {
|
||||
return state
|
||||
}
|
||||
|
||||
func mBlockResponse(peerID p2p.ID, height int64) scBlockReceived {
|
||||
func mBlockResponse(peerID p2p.NodeID, height int64) scBlockReceived {
|
||||
return scBlockReceived{
|
||||
peerID: peerID,
|
||||
block: makePcBlock(height),
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -47,11 +49,6 @@ type BlockchainReactor struct {
|
||||
store blockStore
|
||||
}
|
||||
|
||||
//nolint:unused,deadcode
|
||||
type blockVerifier interface {
|
||||
VerifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error
|
||||
}
|
||||
|
||||
type blockApplier interface {
|
||||
ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, int64, error)
|
||||
}
|
||||
@@ -187,7 +184,7 @@ type rTryPrunePeer struct {
|
||||
}
|
||||
|
||||
func (e rTryPrunePeer) String() string {
|
||||
return fmt.Sprintf(": %v", e.time)
|
||||
return fmt.Sprintf("rTryPrunePeer{%v}", e.time)
|
||||
}
|
||||
|
||||
// ticker event for scheduling block requests
|
||||
@@ -197,7 +194,7 @@ type rTrySchedule struct {
|
||||
}
|
||||
|
||||
func (e rTrySchedule) String() string {
|
||||
return fmt.Sprintf(": %v", e.time)
|
||||
return fmt.Sprintf("rTrySchedule{%v}", e.time)
|
||||
}
|
||||
|
||||
// ticker for block processing
|
||||
@@ -205,52 +202,83 @@ type rProcessBlock struct {
|
||||
priorityNormal
|
||||
}
|
||||
|
||||
func (e rProcessBlock) String() string {
|
||||
return "rProcessBlock"
|
||||
}
|
||||
|
||||
// reactor generated events based on blockchain related messages from peers:
|
||||
// blockResponse message received from a peer
|
||||
type bcBlockResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
size int64
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
func (resp bcBlockResponse) String() string {
|
||||
return fmt.Sprintf("bcBlockResponse{%d#%X (size: %d bytes) from %v at %v}",
|
||||
resp.block.Height, resp.block.Hash(), resp.size, resp.peerID, resp.time)
|
||||
}
|
||||
|
||||
// blockNoResponse message received from a peer
|
||||
type bcNoBlockResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
height int64
|
||||
}
|
||||
|
||||
func (resp bcNoBlockResponse) String() string {
|
||||
return fmt.Sprintf("bcNoBlockResponse{%v has no block at height %d at %v}",
|
||||
resp.peerID, resp.height, resp.time)
|
||||
}
|
||||
|
||||
// statusResponse message received from a peer
|
||||
type bcStatusResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
base int64
|
||||
height int64
|
||||
}
|
||||
|
||||
func (resp bcStatusResponse) String() string {
|
||||
return fmt.Sprintf("bcStatusResponse{%v is at height %d (base: %d) at %v}",
|
||||
resp.peerID, resp.height, resp.base, resp.time)
|
||||
}
|
||||
|
||||
// new peer is connected
|
||||
type bcAddNewPeer struct {
|
||||
priorityNormal
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (resp bcAddNewPeer) String() string {
|
||||
return fmt.Sprintf("bcAddNewPeer{%v}", resp.peerID)
|
||||
}
|
||||
|
||||
// existing peer is removed
|
||||
type bcRemovePeer struct {
|
||||
priorityHigh
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
func (resp bcRemovePeer) String() string {
|
||||
return fmt.Sprintf("bcRemovePeer{%v due to %v}", resp.peerID, resp.reason)
|
||||
}
|
||||
|
||||
// resets the scheduler and processor state, e.g. following a switch from state syncing
|
||||
type bcResetState struct {
|
||||
priorityHigh
|
||||
state state.State
|
||||
}
|
||||
|
||||
func (e bcResetState) String() string {
|
||||
return fmt.Sprintf("bcResetState{%v}", e.state)
|
||||
}
|
||||
|
||||
// Takes the channel as a parameter to avoid race conditions on r.events.
|
||||
func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
var lastRate = 0.0
|
||||
@@ -285,6 +313,9 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
defer doStatusTk.Stop()
|
||||
doStatusCh <- struct{}{} // immediately broadcast to get status of existing peers
|
||||
|
||||
// Memoize the scSchedulerFail error to avoid printing it every scheduleFreq.
|
||||
var scSchedulerFailErr error
|
||||
|
||||
// XXX: Extract timers to make testing atemporal
|
||||
for {
|
||||
select {
|
||||
@@ -349,15 +380,27 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
r.logger.Error("Error reporting peer", "err", err)
|
||||
}
|
||||
case scBlockRequest:
|
||||
if err := r.io.sendBlockRequest(event.peerID, event.height); err != nil {
|
||||
peer := r.Switch.Peers().Get(event.peerID)
|
||||
if peer == nil {
|
||||
r.logger.Error("Wanted to send block request, but no such peer", "peerID", event.peerID)
|
||||
continue
|
||||
}
|
||||
if err := r.io.sendBlockRequest(peer, event.height); err != nil {
|
||||
r.logger.Error("Error sending block request", "err", err)
|
||||
}
|
||||
case scFinishedEv:
|
||||
r.processor.send(event)
|
||||
r.scheduler.stop()
|
||||
case scSchedulerFail:
|
||||
r.logger.Error("Scheduler failure", "err", event.reason.Error())
|
||||
if scSchedulerFailErr != event.reason {
|
||||
r.logger.Error("Scheduler failure", "err", event.reason.Error())
|
||||
scSchedulerFailErr = event.reason
|
||||
}
|
||||
case scPeersPruned:
|
||||
// Remove peers from the processor.
|
||||
for _, peerID := range event.peers {
|
||||
r.processor.send(scPeerError{peerID: peerID, reason: errors.New("peer was pruned")})
|
||||
}
|
||||
r.logger.Debug("Pruned peers", "count", len(event.peers))
|
||||
case noOpEvent:
|
||||
default:
|
||||
@@ -420,57 +463,65 @@ func (r *BlockchainReactor) Stop() error {
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling different message types.
|
||||
// XXX: do not call any methods that can block or incur heavy processing.
|
||||
// https://github.com/tendermint/tendermint/issues/2888
|
||||
func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := bc.DecodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
r.logger.Error("error decoding message",
|
||||
"src", src.ID(), "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
|
||||
logger := r.logger.With("src", src.ID(), "chID", chID)
|
||||
|
||||
msgProto := new(bcproto.Message)
|
||||
|
||||
if err := proto.Unmarshal(msgBytes, msgProto); err != nil {
|
||||
logger.Error("error decoding message", "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if err = bc.ValidateMsg(msg); err != nil {
|
||||
r.logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
if err := msgProto.Validate(); err != nil {
|
||||
logger.Error("peer sent us an invalid msg", "msg", msgProto, "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
r.logger.Debug("Receive", "src", src.ID(), "chID", chID, "msg", msg)
|
||||
r.logger.Debug("received", "msg", msgProto)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *bcproto.StatusRequest:
|
||||
if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src.ID()); err != nil {
|
||||
r.logger.Error("Could not send status message to peer", "src", src)
|
||||
switch msg := msgProto.Sum.(type) {
|
||||
case *bcproto.Message_StatusRequest:
|
||||
if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src); err != nil {
|
||||
logger.Error("Could not send status message to src peer")
|
||||
}
|
||||
|
||||
case *bcproto.BlockRequest:
|
||||
block := r.store.LoadBlock(msg.Height)
|
||||
case *bcproto.Message_BlockRequest:
|
||||
block := r.store.LoadBlock(msg.BlockRequest.Height)
|
||||
if block != nil {
|
||||
if err = r.io.sendBlockToPeer(block, src.ID()); err != nil {
|
||||
r.logger.Error("Could not send block message to peer: ", err)
|
||||
if err := r.io.sendBlockToPeer(block, src); err != nil {
|
||||
logger.Error("Could not send block message to src peer", "err", err)
|
||||
}
|
||||
} else {
|
||||
r.logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
peerID := src.ID()
|
||||
if err = r.io.sendBlockNotFound(msg.Height, peerID); err != nil {
|
||||
r.logger.Error("Couldn't send block not found: ", err)
|
||||
logger.Info("peer asking for a block we don't have", "height", msg.BlockRequest.Height)
|
||||
if err := r.io.sendBlockNotFound(msg.BlockRequest.Height, src); err != nil {
|
||||
logger.Error("Couldn't send block not found msg", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
case *bcproto.StatusResponse:
|
||||
case *bcproto.Message_StatusResponse:
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcStatusResponse{peerID: src.ID(), base: msg.Base, height: msg.Height}
|
||||
r.events <- bcStatusResponse{
|
||||
peerID: src.ID(),
|
||||
base: msg.StatusResponse.Base,
|
||||
height: msg.StatusResponse.Height,
|
||||
}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
|
||||
case *bcproto.BlockResponse:
|
||||
r.mtx.RLock()
|
||||
bi, err := types.BlockFromProto(msg.Block)
|
||||
case *bcproto.Message_BlockResponse:
|
||||
bi, err := types.BlockFromProto(msg.BlockResponse.Block)
|
||||
if err != nil {
|
||||
r.logger.Error("error transitioning block from protobuf", "err", err)
|
||||
logger.Error("error transitioning block from protobuf", "err", err)
|
||||
_ = r.reporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcBlockResponse{
|
||||
peerID: src.ID(),
|
||||
@@ -481,10 +532,14 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
|
||||
case *bcproto.NoBlockResponse:
|
||||
case *bcproto.Message_NoBlockResponse:
|
||||
r.mtx.RLock()
|
||||
if r.events != nil {
|
||||
r.events <- bcNoBlockResponse{peerID: src.ID(), height: msg.Height, time: time.Now()}
|
||||
r.events <- bcNoBlockResponse{
|
||||
peerID: src.ID(),
|
||||
height: msg.NoBlockResponse.Height,
|
||||
time: time.Now(),
|
||||
}
|
||||
}
|
||||
r.mtx.RUnlock()
|
||||
}
|
||||
@@ -492,10 +547,16 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
|
||||
// AddPeer implements Reactor interface
|
||||
func (r *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer.ID())
|
||||
err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer)
|
||||
if err != nil {
|
||||
r.logger.Error("Could not send status message to peer new", "src", peer.ID, "height", r.SyncHeight())
|
||||
r.logger.Error("could not send our status to the new peer", "peer", peer.ID, "err", err)
|
||||
}
|
||||
|
||||
err = r.io.sendStatusRequest(peer)
|
||||
if err != nil {
|
||||
r.logger.Error("could not send status request to the new peer", "peer", peer.ID, "err", err)
|
||||
}
|
||||
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
if r.events != nil {
|
||||
@@ -520,7 +581,7 @@ func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: BlockchainChannel,
|
||||
Priority: 10,
|
||||
Priority: 5,
|
||||
SendQueueCapacity: 2000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: bc.MaxMsgSize,
|
||||
|
||||
@@ -9,13 +9,13 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
bc "github.com/tendermint/tendermint/blockchain"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
@@ -32,11 +32,11 @@ import (
|
||||
|
||||
type mockPeer struct {
|
||||
service.Service
|
||||
id p2p.ID
|
||||
id p2p.NodeID
|
||||
}
|
||||
|
||||
func (mp mockPeer) FlushStop() {}
|
||||
func (mp mockPeer) ID() p2p.ID { return mp.id }
|
||||
func (mp mockPeer) ID() p2p.NodeID { return mp.id }
|
||||
func (mp mockPeer) RemoteIP() net.IP { return net.IP{} }
|
||||
func (mp mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.RemoteIP(), Port: 8800} }
|
||||
|
||||
@@ -45,9 +45,9 @@ func (mp mockPeer) IsPersistent() bool { return true }
|
||||
func (mp mockPeer) CloseConn() error { return nil }
|
||||
|
||||
func (mp mockPeer) NodeInfo() p2p.NodeInfo {
|
||||
return p2p.DefaultNodeInfo{
|
||||
DefaultNodeID: "",
|
||||
ListenAddr: "",
|
||||
return p2p.NodeInfo{
|
||||
NodeID: "",
|
||||
ListenAddr: "",
|
||||
}
|
||||
}
|
||||
func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }
|
||||
@@ -93,34 +93,37 @@ type mockSwitchIo struct {
|
||||
numStatusResponse int
|
||||
numBlockResponse int
|
||||
numNoBlockResponse int
|
||||
numStatusRequest int
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
var _ iIO = (*mockSwitchIo)(nil)
|
||||
|
||||
func (sio *mockSwitchIo) sendBlockRequest(_ p2p.Peer, _ int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) sendStatusResponse(base, height int64, peerID p2p.ID) error {
|
||||
func (sio *mockSwitchIo) sendStatusResponse(_, _ int64, _ p2p.Peer) error {
|
||||
sio.mtx.Lock()
|
||||
defer sio.mtx.Unlock()
|
||||
sio.numStatusResponse++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) sendBlockToPeer(block *types.Block, peerID p2p.ID) error {
|
||||
func (sio *mockSwitchIo) sendBlockToPeer(_ *types.Block, _ p2p.Peer) error {
|
||||
sio.mtx.Lock()
|
||||
defer sio.mtx.Unlock()
|
||||
sio.numBlockResponse++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) sendBlockNotFound(height int64, peerID p2p.ID) error {
|
||||
func (sio *mockSwitchIo) sendBlockNotFound(_ int64, _ p2p.Peer) error {
|
||||
sio.mtx.Lock()
|
||||
defer sio.mtx.Unlock()
|
||||
sio.numNoBlockResponse++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) trySwitchToConsensus(state sm.State, skipWAL bool) bool {
|
||||
func (sio *mockSwitchIo) trySwitchToConsensus(_ sm.State, _ bool) bool {
|
||||
sio.mtx.Lock()
|
||||
defer sio.mtx.Unlock()
|
||||
sio.switchedToConsensus = true
|
||||
@@ -131,6 +134,13 @@ func (sio *mockSwitchIo) broadcastStatusRequest() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) sendStatusRequest(_ p2p.Peer) error {
|
||||
sio.mtx.Lock()
|
||||
defer sio.mtx.Unlock()
|
||||
sio.numStatusRequest++
|
||||
return nil
|
||||
}
|
||||
|
||||
type testReactorParams struct {
|
||||
logger log.Logger
|
||||
genDoc *types.GenesisDoc
|
||||
@@ -399,23 +409,37 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
switch ev := step.event.(type) {
|
||||
case bcproto.StatusRequest:
|
||||
old := mockSwitch.numStatusResponse
|
||||
msg, err := bc.EncodeMsg(&ev)
|
||||
assert.NoError(t, err)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
|
||||
|
||||
msgProto := new(bcproto.Message)
|
||||
require.NoError(t, msgProto.Wrap(&ev))
|
||||
|
||||
msgBz, err := proto.Marshal(msgProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
|
||||
assert.Equal(t, old+1, mockSwitch.numStatusResponse)
|
||||
case bcproto.BlockRequest:
|
||||
if ev.Height > params.startHeight {
|
||||
old := mockSwitch.numNoBlockResponse
|
||||
msg, err := bc.EncodeMsg(&ev)
|
||||
assert.NoError(t, err)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
|
||||
|
||||
msgProto := new(bcproto.Message)
|
||||
require.NoError(t, msgProto.Wrap(&ev))
|
||||
|
||||
msgBz, err := proto.Marshal(msgProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
|
||||
assert.Equal(t, old+1, mockSwitch.numNoBlockResponse)
|
||||
} else {
|
||||
old := mockSwitch.numBlockResponse
|
||||
msg, err := bc.EncodeMsg(&ev)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, err)
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.ID(step.peer)}, msg)
|
||||
|
||||
msgProto := new(bcproto.Message)
|
||||
require.NoError(t, msgProto.Wrap(&ev))
|
||||
|
||||
msgBz, err := proto.Marshal(msgProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
|
||||
assert.Equal(t, old+1, mockSwitch.numBlockResponse)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/Workiva/go-datastructures/queue"
|
||||
@@ -11,6 +12,8 @@ import (
|
||||
|
||||
type handleFunc = func(event Event) (Event, error)
|
||||
|
||||
const historySize = 25
|
||||
|
||||
// Routine is a structure that models a finite state machine as serialized
|
||||
// stream of events processed by a handle function. This Routine structure
|
||||
// handles the concurrency and messaging guarantees. Events are sent via
|
||||
@@ -21,6 +24,7 @@ type Routine struct {
|
||||
name string
|
||||
handle handleFunc
|
||||
queue *queue.PriorityQueue
|
||||
history []Event
|
||||
out chan Event
|
||||
fin chan error
|
||||
rdy chan struct{}
|
||||
@@ -34,6 +38,7 @@ func newRoutine(name string, handleFunc handleFunc, bufferSize int) *Routine {
|
||||
name: name,
|
||||
handle: handleFunc,
|
||||
queue: queue.NewPriorityQueue(bufferSize, true),
|
||||
history: make([]Event, 0, historySize),
|
||||
out: make(chan Event, bufferSize),
|
||||
rdy: make(chan struct{}, 1),
|
||||
fin: make(chan error, 1),
|
||||
@@ -53,13 +58,24 @@ func (rt *Routine) setMetrics(metrics *Metrics) {
|
||||
}
|
||||
|
||||
func (rt *Routine) start() {
|
||||
rt.logger.Info(fmt.Sprintf("%s: run\n", rt.name))
|
||||
rt.logger.Info(fmt.Sprintf("%s: run", rt.name))
|
||||
running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1))
|
||||
if !running {
|
||||
panic(fmt.Sprintf("%s is already running", rt.name))
|
||||
}
|
||||
close(rt.rdy)
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
var (
|
||||
b strings.Builder
|
||||
j int
|
||||
)
|
||||
for i := len(rt.history) - 1; i >= 0; i-- {
|
||||
fmt.Fprintf(&b, "%d: %+v\n", j, rt.history[i])
|
||||
j++
|
||||
}
|
||||
panic(fmt.Sprintf("%v\nlast events:\n%v", r, b.String()))
|
||||
}
|
||||
stopped := atomic.CompareAndSwapUint32(rt.running, uint32(1), uint32(0))
|
||||
if !stopped {
|
||||
panic(fmt.Sprintf("%s is failed to stop", rt.name))
|
||||
@@ -82,7 +98,19 @@ func (rt *Routine) start() {
|
||||
return
|
||||
}
|
||||
rt.metrics.EventsOut.With("routine", rt.name).Add(1)
|
||||
rt.logger.Debug(fmt.Sprintf("%s: produced %T %+v\n", rt.name, oEvent, oEvent))
|
||||
rt.logger.Debug(fmt.Sprintf("%s: produced %T %+v", rt.name, oEvent, oEvent))
|
||||
|
||||
// Skip rTrySchedule and rProcessBlock events as they clutter the history
|
||||
// due to their frequency.
|
||||
switch events[0].(type) {
|
||||
case rTrySchedule:
|
||||
case rProcessBlock:
|
||||
default:
|
||||
rt.history = append(rt.history, events[0].(Event))
|
||||
if len(rt.history) > historySize {
|
||||
rt.history = rt.history[1:]
|
||||
}
|
||||
}
|
||||
|
||||
rt.out <- oEvent
|
||||
}
|
||||
@@ -97,7 +125,7 @@ func (rt *Routine) send(event Event) bool {
|
||||
err := rt.queue.Put(event)
|
||||
if err != nil {
|
||||
rt.metrics.EventsShed.With("routine", rt.name).Add(1)
|
||||
rt.logger.Info(fmt.Sprintf("%s: send failed, queue was full/stopped \n", rt.name))
|
||||
rt.logger.Error(fmt.Sprintf("%s: send failed, queue was full/stopped", rt.name))
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -122,7 +150,7 @@ func (rt *Routine) stop() {
|
||||
return
|
||||
}
|
||||
|
||||
rt.logger.Info(fmt.Sprintf("%s: stop\n", rt.name))
|
||||
rt.logger.Info(fmt.Sprintf("%s: stop", rt.name))
|
||||
rt.queue.Dispose() // this should block until all queue items are free?
|
||||
}
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ package v2
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
@@ -18,35 +19,51 @@ type scFinishedEv struct {
|
||||
reason string
|
||||
}
|
||||
|
||||
func (e scFinishedEv) String() string {
|
||||
return fmt.Sprintf("scFinishedEv{%v}", e.reason)
|
||||
}
|
||||
|
||||
// send a blockRequest message
|
||||
type scBlockRequest struct {
|
||||
priorityNormal
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
height int64
|
||||
}
|
||||
|
||||
func (e scBlockRequest) String() string {
|
||||
return fmt.Sprintf("scBlockRequest{%d from %v}", e.height, e.peerID)
|
||||
}
|
||||
|
||||
// a block has been received and validated by the scheduler
|
||||
type scBlockReceived struct {
|
||||
priorityNormal
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
func (e scBlockReceived) String() string {
|
||||
return fmt.Sprintf("scBlockReceived{%d#%X from %v}", e.block.Height, e.block.Hash(), e.peerID)
|
||||
}
|
||||
|
||||
// scheduler detected a peer error
|
||||
type scPeerError struct {
|
||||
priorityHigh
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
reason error
|
||||
}
|
||||
|
||||
func (e scPeerError) String() string {
|
||||
return fmt.Sprintf("scPeerError - peerID %s, err %s", e.peerID, e.reason)
|
||||
return fmt.Sprintf("scPeerError{%v errored with %v}", e.peerID, e.reason)
|
||||
}
|
||||
|
||||
// scheduler removed a set of peers (timed out or slow peer)
|
||||
type scPeersPruned struct {
|
||||
priorityHigh
|
||||
peers []p2p.ID
|
||||
peers []p2p.NodeID
|
||||
}
|
||||
|
||||
func (e scPeersPruned) String() string {
|
||||
return fmt.Sprintf("scPeersPruned{%v}", e.peers)
|
||||
}
|
||||
|
||||
// XXX: make this fatal?
|
||||
@@ -56,6 +73,10 @@ type scSchedulerFail struct {
|
||||
reason error
|
||||
}
|
||||
|
||||
func (e scSchedulerFail) String() string {
|
||||
return fmt.Sprintf("scSchedulerFail{%v}", e.reason)
|
||||
}
|
||||
|
||||
type blockState int
|
||||
|
||||
const (
|
||||
@@ -105,7 +126,7 @@ func (e peerState) String() string {
|
||||
}
|
||||
|
||||
type scPeer struct {
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
|
||||
// initialized as New when peer is added, updated to Ready when statusUpdate is received,
|
||||
// updated to Removed when peer is removed
|
||||
@@ -122,7 +143,7 @@ func (p scPeer) String() string {
|
||||
p.state, p.base, p.height, p.lastTouched, p.lastRate, p.peerID)
|
||||
}
|
||||
|
||||
func newScPeer(peerID p2p.ID) *scPeer {
|
||||
func newScPeer(peerID p2p.NodeID) *scPeer {
|
||||
return &scPeer{
|
||||
peerID: peerID,
|
||||
state: peerStateNew,
|
||||
@@ -150,7 +171,7 @@ type scheduler struct {
|
||||
|
||||
// a map of peerID to scheduler specific peer struct `scPeer` used to keep
|
||||
// track of peer specific state
|
||||
peers map[p2p.ID]*scPeer
|
||||
peers map[p2p.NodeID]*scPeer
|
||||
peerTimeout time.Duration // maximum response time from a peer otherwise prune
|
||||
minRecvRate int64 // minimum receive rate from peer otherwise prune
|
||||
|
||||
@@ -162,13 +183,13 @@ type scheduler struct {
|
||||
blockStates map[int64]blockState
|
||||
|
||||
// a map of heights to the peer we are waiting a response from
|
||||
pendingBlocks map[int64]p2p.ID
|
||||
pendingBlocks map[int64]p2p.NodeID
|
||||
|
||||
// the time at which a block was put in blockStatePending
|
||||
pendingTime map[int64]time.Time
|
||||
|
||||
// a map of heights to the peers that put the block in blockStateReceived
|
||||
receivedBlocks map[int64]p2p.ID
|
||||
receivedBlocks map[int64]p2p.NodeID
|
||||
}
|
||||
|
||||
func (sc scheduler) String() string {
|
||||
@@ -183,26 +204,26 @@ func newScheduler(initHeight int64, startTime time.Time) *scheduler {
|
||||
syncTimeout: 60 * time.Second,
|
||||
height: initHeight,
|
||||
blockStates: make(map[int64]blockState),
|
||||
peers: make(map[p2p.ID]*scPeer),
|
||||
pendingBlocks: make(map[int64]p2p.ID),
|
||||
peers: make(map[p2p.NodeID]*scPeer),
|
||||
pendingBlocks: make(map[int64]p2p.NodeID),
|
||||
pendingTime: make(map[int64]time.Time),
|
||||
receivedBlocks: make(map[int64]p2p.ID),
|
||||
receivedBlocks: make(map[int64]p2p.NodeID),
|
||||
targetPending: 10, // TODO - pass as param
|
||||
peerTimeout: 15 * time.Second, // TODO - pass as param
|
||||
minRecvRate: 0, //int64(7680), TODO - pass as param
|
||||
minRecvRate: 0, // int64(7680), TODO - pass as param
|
||||
}
|
||||
|
||||
return &sc
|
||||
}
|
||||
|
||||
func (sc *scheduler) ensurePeer(peerID p2p.ID) *scPeer {
|
||||
func (sc *scheduler) ensurePeer(peerID p2p.NodeID) *scPeer {
|
||||
if _, ok := sc.peers[peerID]; !ok {
|
||||
sc.peers[peerID] = newScPeer(peerID)
|
||||
}
|
||||
return sc.peers[peerID]
|
||||
}
|
||||
|
||||
func (sc *scheduler) touchPeer(peerID p2p.ID, time time.Time) error {
|
||||
func (sc *scheduler) touchPeer(peerID p2p.NodeID, time time.Time) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("couldn't find peer %s", peerID)
|
||||
@@ -217,14 +238,13 @@ func (sc *scheduler) touchPeer(peerID p2p.ID, time time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) removePeer(peerID p2p.ID) error {
|
||||
func (sc *scheduler) removePeer(peerID p2p.NodeID) {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
if peer.state == peerStateRemoved {
|
||||
return fmt.Errorf("tried to remove peer %s in peerStateRemoved", peerID)
|
||||
return
|
||||
}
|
||||
|
||||
for height, pendingPeerID := range sc.pendingBlocks {
|
||||
@@ -258,8 +278,6 @@ func (sc *scheduler) removePeer(peerID p2p.ID) error {
|
||||
delete(sc.blockStates, h)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// check if the blockPool is running low and add new blocks in New state to be requested.
|
||||
@@ -280,21 +298,20 @@ func (sc *scheduler) addNewBlocks() {
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *scheduler) setPeerRange(peerID p2p.ID, base int64, height int64) error {
|
||||
func (sc *scheduler) setPeerRange(peerID p2p.NodeID, base int64, height int64) error {
|
||||
peer := sc.ensurePeer(peerID)
|
||||
|
||||
if peer.state == peerStateRemoved {
|
||||
return fmt.Errorf("cannot set peer height for a peer in peerStateRemoved")
|
||||
return nil // noop
|
||||
}
|
||||
|
||||
if height < peer.height {
|
||||
if err := sc.removePeer(peerID); err != nil {
|
||||
return err
|
||||
}
|
||||
sc.removePeer(peerID)
|
||||
return fmt.Errorf("cannot move peer height lower. from %d to %d", peer.height, height)
|
||||
}
|
||||
|
||||
if base > height {
|
||||
sc.removePeer(peerID)
|
||||
return fmt.Errorf("cannot set peer base higher than its height")
|
||||
}
|
||||
|
||||
@@ -316,8 +333,8 @@ func (sc *scheduler) getStateAtHeight(height int64) blockState {
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *scheduler) getPeersWithHeight(height int64) []p2p.ID {
|
||||
peers := make([]p2p.ID, 0)
|
||||
func (sc *scheduler) getPeersWithHeight(height int64) []p2p.NodeID {
|
||||
peers := make([]p2p.NodeID, 0)
|
||||
for _, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
@@ -329,8 +346,8 @@ func (sc *scheduler) getPeersWithHeight(height int64) []p2p.ID {
|
||||
return peers
|
||||
}
|
||||
|
||||
func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.ID {
|
||||
prunable := make([]p2p.ID, 0)
|
||||
func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.NodeID {
|
||||
prunable := make([]p2p.NodeID, 0)
|
||||
for peerID, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
@@ -348,15 +365,9 @@ func (sc *scheduler) setStateAtHeight(height int64, state blockState) {
|
||||
sc.blockStates[height] = state
|
||||
}
|
||||
|
||||
func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int64, now time.Time) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("received block from unknown peer %s", peerID)
|
||||
}
|
||||
|
||||
if peer.state != peerStateReady {
|
||||
return fmt.Errorf("cannot receive blocks from not ready peer %s", peerID)
|
||||
}
|
||||
// CONTRACT: peer exists and in Ready state.
|
||||
func (sc *scheduler) markReceived(peerID p2p.NodeID, height int64, size int64, now time.Time) error {
|
||||
peer := sc.peers[peerID]
|
||||
|
||||
if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID {
|
||||
return fmt.Errorf("received block %d from peer %s without being requested", height, peerID)
|
||||
@@ -379,7 +390,7 @@ func (sc *scheduler) markReceived(peerID p2p.ID, height int64, size int64, now t
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) markPending(peerID p2p.ID, height int64, time time.Time) error {
|
||||
func (sc *scheduler) markPending(peerID p2p.NodeID, height int64, time time.Time) error {
|
||||
state := sc.getStateAtHeight(height)
|
||||
if state != blockStateNew {
|
||||
return fmt.Errorf("block %d should be in blockStateNew but is %s", height, state)
|
||||
@@ -412,17 +423,17 @@ func (sc *scheduler) markPending(peerID p2p.ID, height int64, time time.Time) er
|
||||
}
|
||||
|
||||
func (sc *scheduler) markProcessed(height int64) error {
|
||||
// It is possible that a peer error or timeout is handled after the processor
|
||||
// has processed the block but before the scheduler received this event, so
|
||||
// when pcBlockProcessed event is received, the block had been requested
|
||||
// again => don't check the block state.
|
||||
sc.lastAdvance = time.Now()
|
||||
state := sc.getStateAtHeight(height)
|
||||
if state != blockStateReceived {
|
||||
return fmt.Errorf("cannot mark height %d received from block state %s", height, state)
|
||||
}
|
||||
|
||||
sc.height++
|
||||
sc.height = height + 1
|
||||
delete(sc.pendingBlocks, height)
|
||||
delete(sc.pendingTime, height)
|
||||
delete(sc.receivedBlocks, height)
|
||||
delete(sc.blockStates, height)
|
||||
sc.addNewBlocks()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -461,7 +472,7 @@ func (sc *scheduler) nextHeightToSchedule() int64 {
|
||||
return min
|
||||
}
|
||||
|
||||
func (sc *scheduler) pendingFrom(peerID p2p.ID) []int64 {
|
||||
func (sc *scheduler) pendingFrom(peerID p2p.NodeID) []int64 {
|
||||
var heights []int64
|
||||
for height, pendingPeerID := range sc.pendingBlocks {
|
||||
if pendingPeerID == peerID {
|
||||
@@ -471,7 +482,7 @@ func (sc *scheduler) pendingFrom(peerID p2p.ID) []int64 {
|
||||
return heights
|
||||
}
|
||||
|
||||
func (sc *scheduler) selectPeer(height int64) (p2p.ID, error) {
|
||||
func (sc *scheduler) selectPeer(height int64) (p2p.NodeID, error) {
|
||||
peers := sc.getPeersWithHeight(height)
|
||||
if len(peers) == 0 {
|
||||
return "", fmt.Errorf("cannot find peer for height %d", height)
|
||||
@@ -479,7 +490,7 @@ func (sc *scheduler) selectPeer(height int64) (p2p.ID, error) {
|
||||
|
||||
// create a map from number of pending requests to a list
|
||||
// of peers having that number of pending requests.
|
||||
pendingFrom := make(map[int][]p2p.ID)
|
||||
pendingFrom := make(map[int][]p2p.NodeID)
|
||||
for _, peerID := range peers {
|
||||
numPending := len(sc.pendingFrom(peerID))
|
||||
pendingFrom[numPending] = append(pendingFrom[numPending], peerID)
|
||||
@@ -498,7 +509,7 @@ func (sc *scheduler) selectPeer(height int64) (p2p.ID, error) {
|
||||
}
|
||||
|
||||
// PeerByID is a list of peers sorted by peerID.
|
||||
type PeerByID []p2p.ID
|
||||
type PeerByID []p2p.NodeID
|
||||
|
||||
func (peers PeerByID) Len() int {
|
||||
return len(peers)
|
||||
@@ -508,9 +519,7 @@ func (peers PeerByID) Less(i, j int) bool {
|
||||
}
|
||||
|
||||
func (peers PeerByID) Swap(i, j int) {
|
||||
it := peers[i]
|
||||
peers[i] = peers[j]
|
||||
peers[j] = it
|
||||
peers[i], peers[j] = peers[j], peers[i]
|
||||
}
|
||||
|
||||
// Handlers
|
||||
@@ -519,12 +528,13 @@ func (peers PeerByID) Swap(i, j int) {
|
||||
func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) {
|
||||
err := sc.touchPeer(event.peerID, event.time)
|
||||
if err != nil {
|
||||
return scPeerError{peerID: event.peerID, reason: err}, nil
|
||||
// peer does not exist OR not ready
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
err = sc.markReceived(event.peerID, event.block.Height, event.size, event.time)
|
||||
if err != nil {
|
||||
_ = sc.removePeer(event.peerID)
|
||||
sc.removePeer(event.peerID)
|
||||
return scPeerError{peerID: event.peerID, reason: err}, nil
|
||||
}
|
||||
|
||||
@@ -532,16 +542,14 @@ func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) {
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleNoBlockResponse(event bcNoBlockResponse) (Event, error) {
|
||||
if len(sc.peers) == 0 {
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
// No such peer or peer was removed.
|
||||
peer, ok := sc.peers[event.peerID]
|
||||
if !ok || peer.state == peerStateRemoved {
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
// The peer may have been just removed due to errors, low speed or timeouts.
|
||||
_ = sc.removePeer(event.peerID)
|
||||
sc.removePeer(event.peerID)
|
||||
|
||||
return scPeerError{peerID: event.peerID,
|
||||
reason: fmt.Errorf("peer %v with base %d height %d claims no block for %d",
|
||||
@@ -550,13 +558,11 @@ func (sc *scheduler) handleNoBlockResponse(event bcNoBlockResponse) (Event, erro
|
||||
|
||||
func (sc *scheduler) handleBlockProcessed(event pcBlockProcessed) (Event, error) {
|
||||
if event.height != sc.height {
|
||||
panic(fmt.Sprintf("processed height %d but expected height %d", event.height, sc.height))
|
||||
panic(fmt.Sprintf("processed height %d, but expected height %d", event.height, sc.height))
|
||||
}
|
||||
|
||||
err := sc.markProcessed(event.height)
|
||||
if err != nil {
|
||||
// It is possible that a peer error or timeout is handled after the processor
|
||||
// has processed the block but before the scheduler received this event,
|
||||
// so when pcBlockProcessed event is received the block had been requested again.
|
||||
return scSchedulerFail{reason: err}, nil
|
||||
}
|
||||
|
||||
@@ -570,13 +576,10 @@ func (sc *scheduler) handleBlockProcessed(event pcBlockProcessed) (Event, error)
|
||||
// Handles an error from the processor. The processor had already cleaned the blocks from
|
||||
// the peers included in this event. Just attempt to remove the peers.
|
||||
func (sc *scheduler) handleBlockProcessError(event pcBlockVerificationFailure) (Event, error) {
|
||||
if len(sc.peers) == 0 {
|
||||
return noOp, nil
|
||||
}
|
||||
// The peers may have been just removed due to errors, low speed or timeouts.
|
||||
_ = sc.removePeer(event.firstPeerID)
|
||||
sc.removePeer(event.firstPeerID)
|
||||
if event.firstPeerID != event.secondPeerID {
|
||||
_ = sc.removePeer(event.secondPeerID)
|
||||
sc.removePeer(event.secondPeerID)
|
||||
}
|
||||
|
||||
if sc.allBlocksProcessed() {
|
||||
@@ -592,20 +595,18 @@ func (sc *scheduler) handleAddNewPeer(event bcAddNewPeer) (Event, error) {
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleRemovePeer(event bcRemovePeer) (Event, error) {
|
||||
err := sc.removePeer(event.peerID)
|
||||
if err != nil {
|
||||
// XXX - It is possible that the removePeer fails here for legitimate reasons
|
||||
// for example if a peer timeout or error was handled just before this.
|
||||
return scSchedulerFail{reason: err}, nil
|
||||
}
|
||||
sc.removePeer(event.peerID)
|
||||
|
||||
if sc.allBlocksProcessed() {
|
||||
return scFinishedEv{reason: "removed peer"}, nil
|
||||
}
|
||||
return noOp, nil
|
||||
|
||||
// Return scPeerError so the peer (and all associated blocks) is removed from
|
||||
// the processor.
|
||||
return scPeerError{peerID: event.peerID, reason: errors.New("peer was stopped")}, nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) {
|
||||
|
||||
// Check behavior of peer responsible to deliver block at sc.height.
|
||||
timeHeightAsked, ok := sc.pendingTime[sc.height]
|
||||
if ok && time.Since(timeHeightAsked) > sc.peerTimeout {
|
||||
@@ -613,9 +614,7 @@ func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) {
|
||||
// from that peer within sc.peerTimeout. Remove the peer. This is to ensure that a peer
|
||||
// will be timed out even if it sends blocks at higher heights but prevents progress by
|
||||
// not sending the block at current height.
|
||||
if err := sc.removePeer(sc.pendingBlocks[sc.height]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sc.removePeer(sc.pendingBlocks[sc.height])
|
||||
}
|
||||
|
||||
prunablePeers := sc.prunablePeers(sc.peerTimeout, sc.minRecvRate, event.time)
|
||||
@@ -623,11 +622,7 @@ func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) {
|
||||
return noOp, nil
|
||||
}
|
||||
for _, peerID := range prunablePeers {
|
||||
err := sc.removePeer(peerID)
|
||||
if err != nil {
|
||||
// Should never happen as prunablePeers() returns only existing peers in Ready state.
|
||||
panic("scheduler data corruption")
|
||||
}
|
||||
sc.removePeer(peerID)
|
||||
}
|
||||
|
||||
// If all blocks are processed we should finish.
|
||||
@@ -636,7 +631,6 @@ func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) {
|
||||
}
|
||||
|
||||
return scPeersPruned{peers: prunablePeers}, nil
|
||||
|
||||
}
|
||||
|
||||
func (sc *scheduler) handleResetState(event bcResetState) (Event, error) {
|
||||
|
||||
@@ -20,9 +20,9 @@ type scTestParams struct {
|
||||
initHeight int64
|
||||
height int64
|
||||
allB []int64
|
||||
pending map[int64]p2p.ID
|
||||
pending map[int64]p2p.NodeID
|
||||
pendingTime map[int64]time.Time
|
||||
received map[int64]p2p.ID
|
||||
received map[int64]p2p.NodeID
|
||||
peerTimeout time.Duration
|
||||
minRecvRate int64
|
||||
targetPending int
|
||||
@@ -41,7 +41,7 @@ func verifyScheduler(sc *scheduler) {
|
||||
}
|
||||
|
||||
func newTestScheduler(params scTestParams) *scheduler {
|
||||
peers := make(map[p2p.ID]*scPeer)
|
||||
peers := make(map[p2p.NodeID]*scPeer)
|
||||
var maxHeight int64
|
||||
|
||||
initHeight := params.initHeight
|
||||
@@ -54,8 +54,8 @@ func newTestScheduler(params scTestParams) *scheduler {
|
||||
}
|
||||
|
||||
for id, peer := range params.peers {
|
||||
peer.peerID = p2p.ID(id)
|
||||
peers[p2p.ID(id)] = peer
|
||||
peer.peerID = p2p.NodeID(id)
|
||||
peers[p2p.NodeID(id)] = peer
|
||||
if maxHeight < peer.height {
|
||||
maxHeight = peer.height
|
||||
}
|
||||
@@ -122,7 +122,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
name: "one ready peer",
|
||||
sc: scheduler{
|
||||
height: 3,
|
||||
peers: map[p2p.ID]*scPeer{"P1": {height: 6, state: peerStateReady}},
|
||||
peers: map[p2p.NodeID]*scPeer{"P1": {height: 6, state: peerStateReady}},
|
||||
},
|
||||
wantMax: 6,
|
||||
},
|
||||
@@ -130,7 +130,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
name: "ready and removed peers",
|
||||
sc: scheduler{
|
||||
height: 1,
|
||||
peers: map[p2p.ID]*scPeer{
|
||||
peers: map[p2p.NodeID]*scPeer{
|
||||
"P1": {height: 4, state: peerStateReady},
|
||||
"P2": {height: 10, state: peerStateRemoved}},
|
||||
},
|
||||
@@ -140,7 +140,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
name: "removed peers",
|
||||
sc: scheduler{
|
||||
height: 1,
|
||||
peers: map[p2p.ID]*scPeer{
|
||||
peers: map[p2p.NodeID]*scPeer{
|
||||
"P1": {height: 4, state: peerStateRemoved},
|
||||
"P2": {height: 10, state: peerStateRemoved}},
|
||||
},
|
||||
@@ -150,7 +150,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
name: "new peers",
|
||||
sc: scheduler{
|
||||
height: 1,
|
||||
peers: map[p2p.ID]*scPeer{
|
||||
peers: map[p2p.NodeID]*scPeer{
|
||||
"P1": {base: -1, height: -1, state: peerStateNew},
|
||||
"P2": {base: -1, height: -1, state: peerStateNew}},
|
||||
},
|
||||
@@ -160,7 +160,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
name: "mixed peers",
|
||||
sc: scheduler{
|
||||
height: 1,
|
||||
peers: map[p2p.ID]*scPeer{
|
||||
peers: map[p2p.NodeID]*scPeer{
|
||||
"P1": {height: -1, state: peerStateNew},
|
||||
"P2": {height: 10, state: peerStateReady},
|
||||
"P3": {height: 20, state: peerStateRemoved},
|
||||
@@ -187,7 +187,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
func TestScEnsurePeer(t *testing.T) {
|
||||
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -244,7 +244,7 @@ func TestScTouchPeer(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
time time.Time
|
||||
}
|
||||
|
||||
@@ -316,13 +316,13 @@ func TestScPrunablePeers(t *testing.T) {
|
||||
name string
|
||||
fields scTestParams
|
||||
args args
|
||||
wantResult []p2p.ID
|
||||
wantResult []p2p.NodeID
|
||||
}{
|
||||
{
|
||||
name: "no peers",
|
||||
fields: scTestParams{peers: map[string]*scPeer{}},
|
||||
args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100},
|
||||
wantResult: []p2p.ID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "mixed peers",
|
||||
@@ -341,7 +341,7 @@ func TestScPrunablePeers(t *testing.T) {
|
||||
"P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90},
|
||||
}},
|
||||
args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100},
|
||||
wantResult: []p2p.ID{"P4", "P5", "P6"},
|
||||
wantResult: []p2p.NodeID{"P4", "P5", "P6"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -361,7 +361,7 @@ func TestScPrunablePeers(t *testing.T) {
|
||||
func TestScRemovePeer(t *testing.T) {
|
||||
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -418,20 +418,19 @@ func TestScRemovePeer(t *testing.T) {
|
||||
"P1": {height: 10, state: peerStateRemoved},
|
||||
"P2": {height: 11, state: peerStateReady}},
|
||||
allB: []int64{8, 9, 10, 11}},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "remove Ready peer with blocks requested",
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]p2p.ID{1: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1"},
|
||||
},
|
||||
args: args{peerID: "P1"},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}},
|
||||
allB: []int64{},
|
||||
pending: map[int64]p2p.ID{},
|
||||
pending: map[int64]p2p.NodeID{},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -439,13 +438,13 @@ func TestScRemovePeer(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3},
|
||||
received: map[int64]p2p.ID{1: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1"},
|
||||
},
|
||||
args: args{peerID: "P1"},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}},
|
||||
allB: []int64{},
|
||||
received: map[int64]p2p.ID{},
|
||||
received: map[int64]p2p.NodeID{},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -453,15 +452,15 @@ func TestScRemovePeer(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]p2p.ID{1: "P1", 3: "P1"},
|
||||
received: map[int64]p2p.ID{2: "P1", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 3: "P1"},
|
||||
received: map[int64]p2p.NodeID{2: "P1", 4: "P1"},
|
||||
},
|
||||
args: args{peerID: "P1"},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}},
|
||||
allB: []int64{},
|
||||
pending: map[int64]p2p.ID{},
|
||||
received: map[int64]p2p.ID{},
|
||||
pending: map[int64]p2p.NodeID{},
|
||||
received: map[int64]p2p.NodeID{},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -472,8 +471,8 @@ func TestScRemovePeer(t *testing.T) {
|
||||
"P2": {height: 6, state: peerStateReady},
|
||||
},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6},
|
||||
pending: map[int64]p2p.ID{1: "P1", 3: "P2", 6: "P1"},
|
||||
received: map[int64]p2p.ID{2: "P1", 4: "P2", 5: "P2"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 3: "P2", 6: "P1"},
|
||||
received: map[int64]p2p.NodeID{2: "P1", 4: "P2", 5: "P2"},
|
||||
},
|
||||
args: args{peerID: "P1"},
|
||||
wantFields: scTestParams{
|
||||
@@ -482,8 +481,8 @@ func TestScRemovePeer(t *testing.T) {
|
||||
"P2": {height: 6, state: peerStateReady},
|
||||
},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6},
|
||||
pending: map[int64]p2p.ID{3: "P2"},
|
||||
received: map[int64]p2p.ID{4: "P2", 5: "P2"},
|
||||
pending: map[int64]p2p.NodeID{3: "P2"},
|
||||
received: map[int64]p2p.NodeID{4: "P2", 5: "P2"},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -492,9 +491,7 @@ func TestScRemovePeer(t *testing.T) {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
sc := newTestScheduler(tt.fields)
|
||||
if err := sc.removePeer(tt.args.peerID); (err != nil) != tt.wantErr {
|
||||
t.Errorf("removePeer() wantErr %v, error = %v", tt.wantErr, err)
|
||||
}
|
||||
sc.removePeer(tt.args.peerID)
|
||||
wantSc := newTestScheduler(tt.wantFields)
|
||||
assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers)
|
||||
})
|
||||
@@ -504,7 +501,7 @@ func TestScRemovePeer(t *testing.T) {
|
||||
func TestScSetPeerRange(t *testing.T) {
|
||||
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
base int64
|
||||
height int64
|
||||
}
|
||||
@@ -534,7 +531,6 @@ func TestScSetPeerRange(t *testing.T) {
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}},
|
||||
args: args{peerID: "P1", height: 4},
|
||||
wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "decrease height of single peer",
|
||||
@@ -586,8 +582,7 @@ func TestScSetPeerRange(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4}},
|
||||
args: args{peerID: "P1", base: 6, height: 5},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4}},
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
@@ -627,25 +622,25 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
name string
|
||||
fields scTestParams
|
||||
args args
|
||||
wantResult []p2p.ID
|
||||
wantResult []p2p.NodeID
|
||||
}{
|
||||
{
|
||||
name: "no peers",
|
||||
fields: scTestParams{peers: map[string]*scPeer{}},
|
||||
args: args{height: 10},
|
||||
wantResult: []p2p.ID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "only new peers",
|
||||
fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}},
|
||||
args: args{height: 10},
|
||||
wantResult: []p2p.ID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "only Removed peers",
|
||||
fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}},
|
||||
args: args{height: 2},
|
||||
wantResult: []p2p.ID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "one Ready shorter peer",
|
||||
@@ -654,7 +649,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
},
|
||||
args: args{height: 5},
|
||||
wantResult: []p2p.ID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "one Ready equal peer",
|
||||
@@ -663,7 +658,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
},
|
||||
args: args{height: 4},
|
||||
wantResult: []p2p.ID{"P1"},
|
||||
wantResult: []p2p.NodeID{"P1"},
|
||||
},
|
||||
{
|
||||
name: "one Ready higher peer",
|
||||
@@ -673,7 +668,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
},
|
||||
args: args{height: 4},
|
||||
wantResult: []p2p.ID{"P1"},
|
||||
wantResult: []p2p.NodeID{"P1"},
|
||||
},
|
||||
{
|
||||
name: "one Ready higher peer at base",
|
||||
@@ -683,7 +678,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
},
|
||||
args: args{height: 4},
|
||||
wantResult: []p2p.ID{"P1"},
|
||||
wantResult: []p2p.NodeID{"P1"},
|
||||
},
|
||||
{
|
||||
name: "one Ready higher peer with higher base",
|
||||
@@ -693,7 +688,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
},
|
||||
args: args{height: 4},
|
||||
wantResult: []p2p.ID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "multiple mixed peers",
|
||||
@@ -708,7 +703,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{8, 9, 10, 11},
|
||||
},
|
||||
args: args{height: 8},
|
||||
wantResult: []p2p.ID{"P2", "P5"},
|
||||
wantResult: []p2p.NodeID{"P2", "P5"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -730,7 +725,7 @@ func TestScMarkPending(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
height int64
|
||||
tm time.Time
|
||||
}
|
||||
@@ -826,14 +821,14 @@ func TestScMarkPending(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]p2p.ID{1: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now},
|
||||
},
|
||||
args: args{peerID: "P1", height: 2, tm: now.Add(time.Millisecond)},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]p2p.ID{1: "P1", 2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Millisecond)},
|
||||
},
|
||||
},
|
||||
@@ -856,7 +851,7 @@ func TestScMarkReceived(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
peerID p2p.NodeID
|
||||
height int64
|
||||
size int64
|
||||
tm time.Time
|
||||
@@ -896,7 +891,7 @@ func TestScMarkReceived(t *testing.T) {
|
||||
"P2": {height: 4, state: peerStateReady},
|
||||
},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]p2p.ID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
|
||||
},
|
||||
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
|
||||
wantFields: scTestParams{
|
||||
@@ -905,7 +900,7 @@ func TestScMarkReceived(t *testing.T) {
|
||||
"P2": {height: 4, state: peerStateReady},
|
||||
},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]p2p.ID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
@@ -914,13 +909,13 @@ func TestScMarkReceived(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]p2p.ID{},
|
||||
pending: map[int64]p2p.NodeID{},
|
||||
},
|
||||
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]p2p.ID{},
|
||||
pending: map[int64]p2p.NodeID{},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
@@ -929,14 +924,14 @@ func TestScMarkReceived(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]p2p.ID{1: "P1", 2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)},
|
||||
},
|
||||
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]p2p.ID{1: "P1", 2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)},
|
||||
},
|
||||
wantErr: true,
|
||||
@@ -946,16 +941,16 @@ func TestScMarkReceived(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]p2p.ID{1: "P1", 2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now},
|
||||
},
|
||||
args: args{peerID: "P1", height: 2, size: 1000, tm: now.Add(time.Millisecond)},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]p2p.ID{1: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now},
|
||||
received: map[int64]p2p.ID{2: "P1"},
|
||||
received: map[int64]p2p.NodeID{2: "P1"},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -993,19 +988,20 @@ func TestScMarkProcessed(t *testing.T) {
|
||||
{
|
||||
name: "processed an unreceived block",
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]p2p.ID{2: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now},
|
||||
received: map[int64]p2p.ID{1: "P1"}},
|
||||
height: 2,
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{2},
|
||||
pending: map[int64]p2p.NodeID{2: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now},
|
||||
targetPending: 1,
|
||||
},
|
||||
args: args{height: 2},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]p2p.ID{2: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now},
|
||||
received: map[int64]p2p.ID{1: "P1"}},
|
||||
wantErr: true,
|
||||
height: 3,
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{3},
|
||||
targetPending: 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mark processed success",
|
||||
@@ -1013,15 +1009,15 @@ func TestScMarkProcessed(t *testing.T) {
|
||||
height: 1,
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]p2p.ID{2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now},
|
||||
received: map[int64]p2p.ID{1: "P1"}},
|
||||
received: map[int64]p2p.NodeID{1: "P1"}},
|
||||
args: args{height: 1},
|
||||
wantFields: scTestParams{
|
||||
height: 2,
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{2},
|
||||
pending: map[int64]p2p.ID{2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now}},
|
||||
},
|
||||
}
|
||||
@@ -1105,7 +1101,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now},
|
||||
},
|
||||
wantResult: false,
|
||||
@@ -1115,7 +1111,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
received: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
},
|
||||
wantResult: false,
|
||||
},
|
||||
@@ -1126,7 +1122,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
|
||||
peers: map[string]*scPeer{
|
||||
"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{4},
|
||||
received: map[int64]p2p.ID{4: "P1"},
|
||||
received: map[int64]p2p.NodeID{4: "P1"},
|
||||
},
|
||||
wantResult: true,
|
||||
},
|
||||
@@ -1135,7 +1131,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]p2p.ID{2: "P1", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1", 4: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now, 4: now},
|
||||
},
|
||||
wantResult: false,
|
||||
@@ -1183,7 +1179,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
|
||||
initHeight: 1,
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now},
|
||||
},
|
||||
wantHeight: -1,
|
||||
@@ -1194,7 +1190,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
|
||||
initHeight: 1,
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
received: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
},
|
||||
wantHeight: -1,
|
||||
},
|
||||
@@ -1213,7 +1209,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
|
||||
initHeight: 1,
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]p2p.ID{2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now},
|
||||
},
|
||||
wantHeight: 1,
|
||||
@@ -1243,7 +1239,7 @@ func TestScSelectPeer(t *testing.T) {
|
||||
name string
|
||||
fields scTestParams
|
||||
args args
|
||||
wantResult p2p.ID
|
||||
wantResult p2p.NodeID
|
||||
wantError bool
|
||||
}{
|
||||
{
|
||||
@@ -1311,7 +1307,7 @@ func TestScSelectPeer(t *testing.T) {
|
||||
"P1": {height: 8, state: peerStateReady},
|
||||
"P2": {height: 9, state: peerStateReady}},
|
||||
allB: []int64{4, 5, 6, 7, 8, 9},
|
||||
pending: map[int64]p2p.ID{
|
||||
pending: map[int64]p2p.NodeID{
|
||||
4: "P1", 6: "P1",
|
||||
5: "P2",
|
||||
},
|
||||
@@ -1327,7 +1323,7 @@ func TestScSelectPeer(t *testing.T) {
|
||||
"P1": {height: 15, state: peerStateReady},
|
||||
"P3": {height: 15, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||
pending: map[int64]p2p.ID{
|
||||
pending: map[int64]p2p.NodeID{
|
||||
1: "P1", 2: "P1",
|
||||
3: "P3", 4: "P3",
|
||||
5: "P2", 6: "P2",
|
||||
@@ -1377,6 +1373,9 @@ func checkScResults(t *testing.T, wantErr bool, err error, wantEvent Event, even
|
||||
t.Errorf("error = %v, wantErr %v", err, wantErr)
|
||||
return
|
||||
}
|
||||
if !assert.IsType(t, wantEvent, event) {
|
||||
t.Log(fmt.Sprintf("Wrong type received, got: %v", event))
|
||||
}
|
||||
switch wantEvent := wantEvent.(type) {
|
||||
case scPeerError:
|
||||
assert.Equal(t, wantEvent.peerID, event.(scPeerError).peerID)
|
||||
@@ -1393,7 +1392,7 @@ func TestScHandleBlockResponse(t *testing.T) {
|
||||
now := time.Now()
|
||||
block6FromP1 := bcBlockResponse{
|
||||
time: now.Add(time.Millisecond),
|
||||
peerID: p2p.ID("P1"),
|
||||
peerID: p2p.NodeID("P1"),
|
||||
size: 100,
|
||||
block: makeScBlock(6),
|
||||
}
|
||||
@@ -1413,13 +1412,13 @@ func TestScHandleBlockResponse(t *testing.T) {
|
||||
name: "empty scheduler",
|
||||
fields: scTestParams{},
|
||||
args: args{event: block6FromP1},
|
||||
wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")},
|
||||
wantEvent: noOpEvent{},
|
||||
},
|
||||
{
|
||||
name: "block from removed peer",
|
||||
fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}},
|
||||
args: args{event: block6FromP1},
|
||||
wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")},
|
||||
wantEvent: noOpEvent{},
|
||||
},
|
||||
{
|
||||
name: "block we haven't asked for",
|
||||
@@ -1434,18 +1433,18 @@ func TestScHandleBlockResponse(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]p2p.ID{6: "P2"},
|
||||
pending: map[int64]p2p.NodeID{6: "P2"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: block6FromP1},
|
||||
wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")},
|
||||
wantEvent: noOpEvent{},
|
||||
},
|
||||
{
|
||||
name: "block with bad timestamp",
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]p2p.ID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now.Add(time.Second)},
|
||||
},
|
||||
args: args{event: block6FromP1},
|
||||
@@ -1456,11 +1455,11 @@ func TestScHandleBlockResponse(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]p2p.ID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: block6FromP1},
|
||||
wantEvent: scBlockReceived{peerID: "P1", block: makeScBlock(6)},
|
||||
wantEvent: scBlockReceived{peerID: "P1", block: block6FromP1.block},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1478,7 +1477,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
|
||||
now := time.Now()
|
||||
noBlock6FromP1 := bcNoBlockResponse{
|
||||
time: now.Add(time.Millisecond),
|
||||
peerID: p2p.ID("P1"),
|
||||
peerID: p2p.NodeID("P1"),
|
||||
height: 6,
|
||||
}
|
||||
|
||||
@@ -1514,14 +1513,14 @@ func TestScHandleNoBlockResponse(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]p2p.ID{6: "P2"},
|
||||
pending: map[int64]p2p.NodeID{6: "P2"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
wantEvent: noOpEvent{},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]p2p.ID{6: "P2"},
|
||||
pending: map[int64]p2p.NodeID{6: "P2"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
},
|
||||
@@ -1530,7 +1529,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]p2p.ID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")},
|
||||
@@ -1553,7 +1552,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
|
||||
func TestScHandleBlockProcessed(t *testing.T) {
|
||||
now := time.Now()
|
||||
processed6FromP1 := pcBlockProcessed{
|
||||
peerID: p2p.ID("P1"),
|
||||
peerID: p2p.NodeID("P1"),
|
||||
height: 6,
|
||||
}
|
||||
|
||||
@@ -1572,7 +1571,7 @@ func TestScHandleBlockProcessed(t *testing.T) {
|
||||
name: "empty scheduler",
|
||||
fields: scTestParams{height: 6},
|
||||
args: args{event: processed6FromP1},
|
||||
wantEvent: scSchedulerFail{reason: fmt.Errorf("some error")},
|
||||
wantEvent: noOpEvent{},
|
||||
},
|
||||
{
|
||||
name: "processed block we don't have",
|
||||
@@ -1580,11 +1579,11 @@ func TestScHandleBlockProcessed(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{6, 7, 8},
|
||||
pending: map[int64]p2p.ID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: processed6FromP1},
|
||||
wantEvent: scSchedulerFail{reason: fmt.Errorf("some error")},
|
||||
wantEvent: noOpEvent{},
|
||||
},
|
||||
{
|
||||
name: "processed block ok, we processed all blocks",
|
||||
@@ -1592,7 +1591,7 @@ func TestScHandleBlockProcessed(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}},
|
||||
allB: []int64{6, 7},
|
||||
received: map[int64]p2p.ID{6: "P1", 7: "P1"},
|
||||
received: map[int64]p2p.NodeID{6: "P1", 7: "P1"},
|
||||
},
|
||||
args: args{event: processed6FromP1},
|
||||
wantEvent: scFinishedEv{},
|
||||
@@ -1603,8 +1602,8 @@ func TestScHandleBlockProcessed(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{6, 7, 8},
|
||||
pending: map[int64]p2p.ID{7: "P1", 8: "P1"},
|
||||
received: map[int64]p2p.ID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]p2p.NodeID{6: "P1"},
|
||||
},
|
||||
args: args{event: processed6FromP1},
|
||||
wantEvent: noOpEvent{},
|
||||
@@ -1647,7 +1646,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{6, 7, 8},
|
||||
pending: map[int64]p2p.ID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}},
|
||||
@@ -1659,7 +1658,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{6, 7, 8},
|
||||
pending: map[int64]p2p.ID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}},
|
||||
@@ -1671,7 +1670,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}},
|
||||
allB: []int64{6, 7},
|
||||
received: map[int64]p2p.ID{6: "P1", 7: "P1"},
|
||||
received: map[int64]p2p.NodeID{6: "P1", 7: "P1"},
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 7, firstPeerID: "P1", secondPeerID: "P1"}},
|
||||
wantEvent: scFinishedEv{},
|
||||
@@ -1682,8 +1681,8 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
initHeight: 5,
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{5, 6, 7, 8},
|
||||
pending: map[int64]p2p.ID{7: "P1", 8: "P1"},
|
||||
received: map[int64]p2p.ID{5: "P1", 6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]p2p.NodeID{5: "P1", 6: "P1"},
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P1"}},
|
||||
wantEvent: noOpEvent{},
|
||||
@@ -1698,8 +1697,8 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
"P3": {height: 8, state: peerStateReady},
|
||||
},
|
||||
allB: []int64{5, 6, 7, 8},
|
||||
pending: map[int64]p2p.ID{7: "P1", 8: "P1"},
|
||||
received: map[int64]p2p.ID{5: "P1", 6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]p2p.NodeID{5: "P1", 6: "P1"},
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P2"}},
|
||||
wantEvent: noOpEvent{},
|
||||
@@ -1718,7 +1717,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
|
||||
func TestScHandleAddNewPeer(t *testing.T) {
|
||||
addP1 := bcAddNewPeer{
|
||||
peerID: p2p.ID("P1"),
|
||||
peerID: p2p.NodeID("P1"),
|
||||
}
|
||||
type args struct {
|
||||
event bcAddNewPeer
|
||||
@@ -1829,7 +1828,7 @@ func TestScHandleTryPrunePeer(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7},
|
||||
peerTimeout: time.Second},
|
||||
args: args{event: pruneEv},
|
||||
wantEvent: scPeersPruned{peers: []p2p.ID{"P4", "P5", "P6"}},
|
||||
wantEvent: scPeersPruned{peers: []p2p.NodeID{"P4", "P5", "P6"}},
|
||||
},
|
||||
{
|
||||
name: "mixed peers, finish after pruning",
|
||||
@@ -1927,7 +1926,7 @@ func TestScHandleTrySchedule(t *testing.T) {
|
||||
"P1": {height: 4, state: peerStateReady},
|
||||
"P2": {height: 5, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5},
|
||||
pending: map[int64]p2p.ID{
|
||||
pending: map[int64]p2p.NodeID{
|
||||
1: "P1", 2: "P1",
|
||||
3: "P2",
|
||||
},
|
||||
@@ -1945,7 +1944,7 @@ func TestScHandleTrySchedule(t *testing.T) {
|
||||
"P1": {height: 8, state: peerStateReady},
|
||||
"P3": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]p2p.ID{
|
||||
pending: map[int64]p2p.NodeID{
|
||||
1: "P1", 2: "P1",
|
||||
3: "P3", 4: "P3",
|
||||
5: "P2", 6: "P2",
|
||||
@@ -1998,7 +1997,7 @@ func TestScHandleStatusResponse(t *testing.T) {
|
||||
name: "increase height of removed peer",
|
||||
fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}},
|
||||
args: args{event: statusRespP1Ev},
|
||||
wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")},
|
||||
wantEvent: noOpEvent{},
|
||||
},
|
||||
|
||||
{
|
||||
@@ -2047,6 +2046,8 @@ func TestScHandle(t *testing.T) {
|
||||
priorityNormal
|
||||
}
|
||||
|
||||
block1, block2, block3 := makeScBlock(1), makeScBlock(2), makeScBlock(3)
|
||||
|
||||
t0 := time.Now()
|
||||
tick := make([]time.Time, 100)
|
||||
for i := range tick {
|
||||
@@ -2105,7 +2106,7 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]p2p.ID{1: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: tick[1]},
|
||||
height: 1,
|
||||
},
|
||||
@@ -2117,7 +2118,7 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]p2p.ID{1: "P1", 2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2]},
|
||||
height: 1,
|
||||
},
|
||||
@@ -2129,67 +2130,67 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2], 3: tick[3]},
|
||||
height: 1,
|
||||
},
|
||||
},
|
||||
{ // block response 1
|
||||
args: args{event: bcBlockResponse{peerID: "P1", time: tick[4], size: 100, block: makeScBlock(1)}},
|
||||
wantEvent: scBlockReceived{peerID: "P1", block: makeScBlock(1)},
|
||||
args: args{event: bcBlockResponse{peerID: "P1", time: tick[4], size: 100, block: block1}},
|
||||
wantEvent: scBlockReceived{peerID: "P1", block: block1},
|
||||
wantSc: &scTestParams{
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[4]}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]p2p.ID{2: "P1", 3: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1", 3: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: tick[2], 3: tick[3]},
|
||||
received: map[int64]p2p.ID{1: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1"},
|
||||
height: 1,
|
||||
},
|
||||
},
|
||||
{ // block response 2
|
||||
args: args{event: bcBlockResponse{peerID: "P1", time: tick[5], size: 100, block: makeScBlock(2)}},
|
||||
wantEvent: scBlockReceived{peerID: "P1", block: makeScBlock(2)},
|
||||
args: args{event: bcBlockResponse{peerID: "P1", time: tick[5], size: 100, block: block2}},
|
||||
wantEvent: scBlockReceived{peerID: "P1", block: block2},
|
||||
wantSc: &scTestParams{
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[5]}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]p2p.ID{3: "P1"},
|
||||
pending: map[int64]p2p.NodeID{3: "P1"},
|
||||
pendingTime: map[int64]time.Time{3: tick[3]},
|
||||
received: map[int64]p2p.ID{1: "P1", 2: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
height: 1,
|
||||
},
|
||||
},
|
||||
{ // block response 3
|
||||
args: args{event: bcBlockResponse{peerID: "P1", time: tick[6], size: 100, block: makeScBlock(3)}},
|
||||
wantEvent: scBlockReceived{peerID: "P1", block: makeScBlock(3)},
|
||||
args: args{event: bcBlockResponse{peerID: "P1", time: tick[6], size: 100, block: block3}},
|
||||
wantEvent: scBlockReceived{peerID: "P1", block: block3},
|
||||
wantSc: &scTestParams{
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
allB: []int64{1, 2, 3},
|
||||
received: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
height: 1,
|
||||
},
|
||||
},
|
||||
{ // processed block 1
|
||||
args: args{event: pcBlockProcessed{peerID: p2p.ID("P1"), height: 1}},
|
||||
args: args{event: pcBlockProcessed{peerID: p2p.NodeID("P1"), height: 1}},
|
||||
wantEvent: noOpEvent{},
|
||||
wantSc: &scTestParams{
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
allB: []int64{2, 3},
|
||||
received: map[int64]p2p.ID{2: "P1", 3: "P1"},
|
||||
received: map[int64]p2p.NodeID{2: "P1", 3: "P1"},
|
||||
height: 2,
|
||||
},
|
||||
},
|
||||
{ // processed block 2
|
||||
args: args{event: pcBlockProcessed{peerID: p2p.ID("P1"), height: 2}},
|
||||
args: args{event: pcBlockProcessed{peerID: p2p.NodeID("P1"), height: 2}},
|
||||
wantEvent: scFinishedEv{},
|
||||
wantSc: &scTestParams{
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
allB: []int64{3},
|
||||
received: map[int64]p2p.ID{3: "P1"},
|
||||
received: map[int64]p2p.NodeID{3: "P1"},
|
||||
height: 3,
|
||||
},
|
||||
},
|
||||
@@ -2205,7 +2206,7 @@ func TestScHandle(t *testing.T) {
|
||||
"P1": {height: 4, state: peerStateReady, lastTouched: tick[6]},
|
||||
"P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
received: map[int64]p2p.ID{1: "P1", 2: "P1", 3: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
height: 1,
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"}},
|
||||
@@ -2216,7 +2217,7 @@ func TestScHandle(t *testing.T) {
|
||||
"P1": {height: 4, state: peerStateRemoved, lastTouched: tick[6]},
|
||||
"P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
allB: []int64{1, 2, 3},
|
||||
received: map[int64]p2p.ID{},
|
||||
received: map[int64]p2p.NodeID{},
|
||||
height: 1,
|
||||
},
|
||||
},
|
||||
|
||||
13
buf.gen.yaml
Normal file
13
buf.gen.yaml
Normal file
@@ -0,0 +1,13 @@
|
||||
# The version of the generation template.
|
||||
# Required.
|
||||
# The only currently-valid value is v1beta1.
|
||||
version: v1beta1
|
||||
|
||||
# The plugins to run.
|
||||
plugins:
|
||||
# The name of the plugin.
|
||||
- name: gogofaster
|
||||
# The the relative output directory.
|
||||
out: proto
|
||||
# Any options to provide to the plugin.
|
||||
opt: Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,Mgoogle/protobuf/duration.proto=github.com/golang/protobuf/ptypes/duration,plugins=grpc,paths=source_relative
|
||||
@@ -1,24 +1,50 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
grpcprivval "github.com/tendermint/tendermint/privval/grpc"
|
||||
privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval"
|
||||
)
|
||||
|
||||
var (
|
||||
// Create a metrics registry.
|
||||
reg = prometheus.NewRegistry()
|
||||
|
||||
// Create some standard server metrics.
|
||||
grpcMetrics = grpc_prometheus.NewServerMetrics()
|
||||
)
|
||||
|
||||
func main() {
|
||||
var (
|
||||
addr = flag.String("addr", ":26659", "Address of client to connect to")
|
||||
addr = flag.String("addr", "127.0.0.1:26659", "Address to listen on (host:port)")
|
||||
chainID = flag.String("chain-id", "mychain", "chain id")
|
||||
privValKeyPath = flag.String("priv-key", "", "priv val key file path")
|
||||
privValStatePath = flag.String("priv-state", "", "priv val state file path")
|
||||
insecure = flag.Bool("insecure", false, "allow server to run insecurely (no TLS)")
|
||||
certFile = flag.String("certfile", "", "absolute path to server certificate")
|
||||
keyFile = flag.String("keyfile", "", "absolute path to server key")
|
||||
rootCA = flag.String("rootcafile", "", "absolute path to root CA")
|
||||
prometheusAddr = flag.String("prometheus-addr", "", "address for prometheus endpoint (host:port)")
|
||||
|
||||
logger = log.NewTMLogger(
|
||||
log.NewSyncWriter(os.Stdout),
|
||||
@@ -32,39 +58,106 @@ func main() {
|
||||
"chainID", *chainID,
|
||||
"privKeyPath", *privValKeyPath,
|
||||
"privStatePath", *privValStatePath,
|
||||
"insecure", *insecure,
|
||||
"certFile", *certFile,
|
||||
"keyFile", *keyFile,
|
||||
"rootCA", *rootCA,
|
||||
)
|
||||
|
||||
pv := privval.LoadFilePV(*privValKeyPath, *privValStatePath)
|
||||
|
||||
var dialer privval.SocketDialer
|
||||
opts := []grpc.ServerOption{}
|
||||
if !*insecure {
|
||||
certificate, err := tls.LoadX509KeyPair(*certFile, *keyFile)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to load X509 key pair: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
certPool := x509.NewCertPool()
|
||||
bs, err := ioutil.ReadFile(*rootCA)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to read client ca cert: %s", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if ok := certPool.AppendCertsFromPEM(bs); !ok {
|
||||
fmt.Fprintf(os.Stderr, "failed to append client certs")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
tlsConfig := &tls.Config{
|
||||
ClientAuth: tls.RequireAndVerifyClientCert,
|
||||
Certificates: []tls.Certificate{certificate},
|
||||
ClientCAs: certPool,
|
||||
MinVersion: tls.VersionTLS13,
|
||||
}
|
||||
|
||||
creds := grpc.Creds(credentials.NewTLS(tlsConfig))
|
||||
opts = append(opts, creds)
|
||||
logger.Info("SignerServer: Creating security credentials")
|
||||
} else {
|
||||
logger.Info("SignerServer: You are using an insecure gRPC connection!")
|
||||
}
|
||||
|
||||
// add prometheus metrics for unary RPC calls
|
||||
opts = append(opts, grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor))
|
||||
|
||||
ss := grpcprivval.NewSignerServer(*chainID, pv, logger)
|
||||
|
||||
protocol, address := tmnet.ProtocolAndAddress(*addr)
|
||||
switch protocol {
|
||||
case "unix":
|
||||
dialer = privval.DialUnixFn(address)
|
||||
case "tcp":
|
||||
connTimeout := 3 * time.Second // TODO
|
||||
dialer = privval.DialTCPFn(address, connTimeout, ed25519.GenPrivKey())
|
||||
default:
|
||||
logger.Error("Unknown protocol", "protocol", protocol)
|
||||
|
||||
lis, err := net.Listen(protocol, address)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "SignerServer: Failed to listen %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
sd := privval.NewSignerDialerEndpoint(logger, dialer)
|
||||
ss := privval.NewSignerServer(sd, *chainID, pv)
|
||||
s := grpc.NewServer(opts...)
|
||||
|
||||
err := ss.Start()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
privvalproto.RegisterPrivValidatorAPIServer(s, ss)
|
||||
|
||||
var httpSrv *http.Server
|
||||
if *prometheusAddr != "" {
|
||||
httpSrv = registerPrometheus(*prometheusAddr, s)
|
||||
}
|
||||
|
||||
logger.Info("SignerServer: Starting grpc server")
|
||||
if err := s.Serve(lis); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Unable to listen on port %s: %v", *addr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
err := ss.Stop()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
logger.Debug("SignerServer: calling Close")
|
||||
if *prometheusAddr != "" {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
if err := httpSrv.Shutdown(ctx); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Unable to stop http server: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
s.GracefulStop()
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
}
|
||||
|
||||
func registerPrometheus(addr string, s *grpc.Server) *http.Server {
|
||||
// Initialize all metrics.
|
||||
grpcMetrics.InitializeMetrics(s)
|
||||
// create http server to serve prometheus
|
||||
httpServer := &http.Server{Handler: promhttp.HandlerFor(reg, promhttp.HandlerOpts{}), Addr: addr}
|
||||
|
||||
go func() {
|
||||
if err := httpServer.ListenAndServe(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Unable to start a http server: %v", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
return httpServer
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ func init() {
|
||||
&nodeRPCAddr,
|
||||
flagNodeRPCAddr,
|
||||
"tcp://localhost:26657",
|
||||
"The Tendermint node's RPC address (<host>:<port>)",
|
||||
"the Tendermint node's RPC address (<host>:<port>)",
|
||||
)
|
||||
|
||||
DebugCmd.AddCommand(killCmd)
|
||||
|
||||
@@ -32,14 +32,14 @@ func init() {
|
||||
&frequency,
|
||||
flagFrequency,
|
||||
30,
|
||||
"The frequency (seconds) in which to poll, aggregate and dump Tendermint debug data",
|
||||
"the frequency (seconds) in which to poll, aggregate and dump Tendermint debug data",
|
||||
)
|
||||
|
||||
dumpCmd.Flags().StringVar(
|
||||
&profAddr,
|
||||
flagProfAddr,
|
||||
"",
|
||||
"The profiling server address (<host>:<port>)",
|
||||
"the profiling server address (<host>:<port>)",
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -5,28 +5,30 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// GenNodeKeyCmd allows the generation of a node key. It prints node's ID to
|
||||
// the standard output.
|
||||
// GenNodeKeyCmd allows the generation of a node key. It prints JSON-encoded
|
||||
// NodeKey to the standard output.
|
||||
var GenNodeKeyCmd = &cobra.Command{
|
||||
Use: "gen_node_key",
|
||||
Short: "Generate a node key for this node and print its ID",
|
||||
RunE: genNodeKey,
|
||||
Use: "gen-node-key",
|
||||
Aliases: []string{"gen_node_key"},
|
||||
Short: "Generate a new node key",
|
||||
RunE: genNodeKey,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
func genNodeKey(cmd *cobra.Command, args []string) error {
|
||||
nodeKeyFile := config.NodeKeyFile()
|
||||
if tmos.FileExists(nodeKeyFile) {
|
||||
return fmt.Errorf("node key at %s already exists", nodeKeyFile)
|
||||
nodeKey := p2p.GenNodeKey()
|
||||
|
||||
bz, err := tmjson.Marshal(nodeKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("nodeKey -> json: %w", err)
|
||||
}
|
||||
|
||||
nodeKey, err := p2p.LoadOrGenNodeKey(nodeKeyFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Println(nodeKey.ID())
|
||||
fmt.Printf(`%v
|
||||
`, string(bz))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,22 +7,37 @@ import (
|
||||
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// GenValidatorCmd allows the generation of a keypair for a
|
||||
// validator.
|
||||
var GenValidatorCmd = &cobra.Command{
|
||||
Use: "gen_validator",
|
||||
Short: "Generate new validator keypair",
|
||||
Run: genValidator,
|
||||
Use: "gen-validator",
|
||||
Aliases: []string{"gen_validator"},
|
||||
Short: "Generate new validator keypair",
|
||||
RunE: genValidator,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
func genValidator(cmd *cobra.Command, args []string) {
|
||||
pv := privval.GenFilePV("", "")
|
||||
func init() {
|
||||
GenValidatorCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
|
||||
"Key type to generate privval file with. Options: ed25519, secp256k1")
|
||||
}
|
||||
|
||||
func genValidator(cmd *cobra.Command, args []string) error {
|
||||
pv, err := privval.GenFilePV("", "", keyType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
jsbz, err := tmjson.Marshal(pv)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
return fmt.Errorf("validator -> json: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf(`%v
|
||||
`, string(jsbz))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
@@ -21,6 +22,15 @@ var InitFilesCmd = &cobra.Command{
|
||||
RunE: initFiles,
|
||||
}
|
||||
|
||||
var (
|
||||
keyType string
|
||||
)
|
||||
|
||||
func init() {
|
||||
InitFilesCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
|
||||
"Key type to generate privval file with. Options: ed25519, secp256k1")
|
||||
}
|
||||
|
||||
func initFiles(cmd *cobra.Command, args []string) error {
|
||||
return initFilesWithConfig(config)
|
||||
}
|
||||
@@ -29,13 +39,19 @@ func initFilesWithConfig(config *cfg.Config) error {
|
||||
// private validator
|
||||
privValKeyFile := config.PrivValidatorKeyFile()
|
||||
privValStateFile := config.PrivValidatorStateFile()
|
||||
var pv *privval.FilePV
|
||||
var (
|
||||
pv *privval.FilePV
|
||||
err error
|
||||
)
|
||||
if tmos.FileExists(privValKeyFile) {
|
||||
pv = privval.LoadFilePV(privValKeyFile, privValStateFile)
|
||||
logger.Info("Found private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
} else {
|
||||
pv = privval.GenFilePV(privValKeyFile, privValStateFile)
|
||||
pv, err = privval.GenFilePV(privValKeyFile, privValStateFile, keyType)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
@@ -56,11 +72,17 @@ func initFilesWithConfig(config *cfg.Config) error {
|
||||
if tmos.FileExists(genFile) {
|
||||
logger.Info("Found genesis file", "path", genFile)
|
||||
} else {
|
||||
|
||||
genDoc := types.GenesisDoc{
|
||||
ChainID: fmt.Sprintf("test-chain-%v", tmrand.Str(6)),
|
||||
GenesisTime: tmtime.Now(),
|
||||
ConsensusParams: types.DefaultConsensusParams(),
|
||||
}
|
||||
if keyType == "secp256k1" {
|
||||
genDoc.ConsensusParams.Validator = tmproto.ValidatorParams{
|
||||
PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1},
|
||||
}
|
||||
}
|
||||
pubKey, err := pv.GetPubKey()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get pubkey: %w", err)
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -13,6 +16,7 @@ import (
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/merkle"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
@@ -39,6 +43,12 @@ need a primary RPC address, a trusted hash and height and witness RPC addresses
|
||||
(if not using sequential verification). To restart the node, thereafter
|
||||
only the chainID is required.
|
||||
|
||||
When /abci_query is called, the Merkle key path format is:
|
||||
|
||||
/{store name}/{key}
|
||||
|
||||
Please verify with your application that this Merkle key format is used (true
|
||||
for applications built w/ Cosmos SDK).
|
||||
`,
|
||||
RunE: runProxy,
|
||||
Args: cobra.ExactArgs(1),
|
||||
@@ -51,7 +61,7 @@ var (
|
||||
primaryAddr string
|
||||
witnessAddrsJoined string
|
||||
chainID string
|
||||
home string
|
||||
dir string
|
||||
maxOpenConnections int
|
||||
|
||||
sequential bool
|
||||
@@ -68,27 +78,28 @@ var (
|
||||
|
||||
func init() {
|
||||
LightCmd.Flags().StringVar(&listenAddr, "laddr", "tcp://localhost:8888",
|
||||
"Serve the proxy on the given address")
|
||||
"serve the proxy on the given address")
|
||||
LightCmd.Flags().StringVarP(&primaryAddr, "primary", "p", "",
|
||||
"Connect to a Tendermint node at this address")
|
||||
"connect to a Tendermint node at this address")
|
||||
LightCmd.Flags().StringVarP(&witnessAddrsJoined, "witnesses", "w", "",
|
||||
"Tendermint nodes to cross-check the primary node, comma-separated")
|
||||
LightCmd.Flags().StringVar(&home, "home-dir", ".tendermint-light", "Specify the home directory")
|
||||
"tendermint nodes to cross-check the primary node, comma-separated")
|
||||
LightCmd.Flags().StringVarP(&dir, "dir", "d", os.ExpandEnv(filepath.Join("$HOME", ".tendermint-light")),
|
||||
"specify the directory")
|
||||
LightCmd.Flags().IntVar(
|
||||
&maxOpenConnections,
|
||||
"max-open-connections",
|
||||
900,
|
||||
"Maximum number of simultaneous connections (including WebSocket).")
|
||||
"maximum number of simultaneous connections (including WebSocket).")
|
||||
LightCmd.Flags().DurationVar(&trustingPeriod, "trusting-period", 168*time.Hour,
|
||||
"Trusting period that headers can be verified within. Should be significantly less than the unbonding period")
|
||||
"trusting period that headers can be verified within. Should be significantly less than the unbonding period")
|
||||
LightCmd.Flags().Int64Var(&trustedHeight, "height", 1, "Trusted header's height")
|
||||
LightCmd.Flags().BytesHexVar(&trustedHash, "hash", []byte{}, "Trusted header's hash")
|
||||
LightCmd.Flags().BoolVar(&verbose, "verbose", false, "Verbose output")
|
||||
LightCmd.Flags().StringVar(&trustLevelStr, "trust-level", "1/3",
|
||||
"Trust level. Must be between 1/3 and 3/3",
|
||||
"trust level. Must be between 1/3 and 3/3",
|
||||
)
|
||||
LightCmd.Flags().BoolVar(&sequential, "sequential", false,
|
||||
"Sequential Verification. Verify all headers sequentially as opposed to using skipping verification",
|
||||
"sequential verification. Verify all headers sequentially as opposed to using skipping verification",
|
||||
)
|
||||
}
|
||||
|
||||
@@ -111,10 +122,12 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
witnessesAddrs = strings.Split(witnessAddrsJoined, ",")
|
||||
}
|
||||
|
||||
db, err := dbm.NewGoLevelDB("light-client-db", home)
|
||||
lightDB, err := dbm.NewGoLevelDB("light-client-db", dir)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create a db: %w", err)
|
||||
}
|
||||
// create a prefixed db on the chainID
|
||||
db := dbm.NewPrefixDB(lightDB, []byte(chainID))
|
||||
|
||||
if primaryAddr == "" { // check to see if we can start from an existing state
|
||||
var err error
|
||||
@@ -138,7 +151,25 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("can't parse trust level: %w", err)
|
||||
}
|
||||
|
||||
options := []light.Option{light.Logger(logger)}
|
||||
options := []light.Option{
|
||||
light.Logger(logger),
|
||||
light.ConfirmationFunction(func(action string) bool {
|
||||
fmt.Println(action)
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for {
|
||||
scanner.Scan()
|
||||
response := scanner.Text()
|
||||
switch response {
|
||||
case "y", "Y":
|
||||
return true
|
||||
case "n", "N":
|
||||
return false
|
||||
default:
|
||||
fmt.Println("please input 'Y' or 'n' and press ENTER")
|
||||
}
|
||||
}
|
||||
}),
|
||||
}
|
||||
|
||||
if sequential {
|
||||
options = append(options, light.SequentialVerification())
|
||||
@@ -158,7 +189,7 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
},
|
||||
primaryAddr,
|
||||
witnessesAddrs,
|
||||
dbs.New(db, chainID),
|
||||
dbs.New(db),
|
||||
options...,
|
||||
)
|
||||
} else { // continue from latest state
|
||||
@@ -167,7 +198,7 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
trustingPeriod,
|
||||
primaryAddr,
|
||||
witnessesAddrs,
|
||||
dbs.New(db, chainID),
|
||||
dbs.New(db),
|
||||
options...,
|
||||
)
|
||||
}
|
||||
@@ -194,7 +225,7 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
p := lproxy.Proxy{
|
||||
Addr: listenAddr,
|
||||
Config: cfg,
|
||||
Client: lrpc.NewClient(rpcClient, c),
|
||||
Client: lrpc.NewClient(rpcClient, c, lrpc.KeyPathFn(defaultMerkleKeyPathFn())),
|
||||
Logger: logger,
|
||||
}
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
@@ -235,3 +266,21 @@ func saveProviders(db dbm.DB, primaryAddr, witnessesAddrs string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func defaultMerkleKeyPathFn() lrpc.KeyPathFunc {
|
||||
// regexp for extracting store name from /abci_query path
|
||||
storeNameRegexp := regexp.MustCompile(`\/store\/(.+)\/key`)
|
||||
|
||||
return func(path string, key []byte) (merkle.KeyPath, error) {
|
||||
matches := storeNameRegexp.FindStringSubmatch(path)
|
||||
if len(matches) != 2 {
|
||||
return nil, fmt.Errorf("can't find store name in %s using %s", path, storeNameRegexp)
|
||||
}
|
||||
storeName := matches[1]
|
||||
|
||||
kp := merkle.KeyPath{}
|
||||
kp = kp.AppendKey([]byte(storeName), merkle.KeyEncodingURL)
|
||||
kp = kp.AppendKey(key, merkle.KeyEncodingURL)
|
||||
return kp, nil
|
||||
}
|
||||
}
|
||||
@@ -11,9 +11,11 @@ import (
|
||||
|
||||
// ProbeUpnpCmd adds capabilities to test the UPnP functionality.
|
||||
var ProbeUpnpCmd = &cobra.Command{
|
||||
Use: "probe_upnp",
|
||||
Short: "Test UPnP functionality",
|
||||
RunE: probeUpnp,
|
||||
Use: "probe-upnp",
|
||||
Aliases: []string{"probe_upnp"},
|
||||
Short: "Test UPnP functionality",
|
||||
RunE: probeUpnp,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
func probeUpnp(cmd *cobra.Command, args []string) error {
|
||||
|
||||
@@ -18,9 +18,11 @@ var ReplayCmd = &cobra.Command{
|
||||
// ReplayConsoleCmd allows replaying of messages from the WAL in a
|
||||
// console.
|
||||
var ReplayConsoleCmd = &cobra.Command{
|
||||
Use: "replay_console",
|
||||
Short: "Replay messages from WAL in a console",
|
||||
Use: "replay-console",
|
||||
Aliases: []string{"replay_console"},
|
||||
Short: "Replay messages from WAL in a console",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
consensus.RunReplayFile(config.BaseConfig, config.Consensus, true)
|
||||
},
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
@@ -8,27 +8,34 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// ResetAllCmd removes the database of this Tendermint core
|
||||
// instance.
|
||||
var ResetAllCmd = &cobra.Command{
|
||||
Use: "unsafe_reset_all",
|
||||
Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state",
|
||||
Run: resetAll,
|
||||
Use: "unsafe-reset-all",
|
||||
Aliases: []string{"unsafe_reset_all"},
|
||||
Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state",
|
||||
Run: resetAll,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
var keepAddrBook bool
|
||||
|
||||
func init() {
|
||||
ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "Keep the address book intact")
|
||||
ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "keep the address book intact")
|
||||
ResetPrivValidatorCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
|
||||
"Key type to generate privval file with. Options: ed25519, secp256k1")
|
||||
}
|
||||
|
||||
// ResetPrivValidatorCmd resets the private validator files.
|
||||
var ResetPrivValidatorCmd = &cobra.Command{
|
||||
Use: "unsafe_reset_priv_validator",
|
||||
Short: "(unsafe) Reset this node's validator to genesis state",
|
||||
Run: resetPrivValidator,
|
||||
Use: "unsafe-reset-priv-validator",
|
||||
Aliases: []string{"unsafe_reset_priv_validator"},
|
||||
Short: "(unsafe) Reset this node's validator to genesis state",
|
||||
Run: resetPrivValidator,
|
||||
PreRun: deprecateSnakeCase,
|
||||
}
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
@@ -71,7 +78,10 @@ func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) {
|
||||
logger.Info("Reset private validator file to genesis state", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
} else {
|
||||
pv := privval.GenFilePV(privValKeyFile, privValStateFile)
|
||||
pv, err := privval.GenFilePV(privValKeyFile, privValStateFile, keyType)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator file", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user