mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-12 15:52:50 +00:00
Compare commits
83 Commits
marko/brin
...
v0.32.x
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
53a9ba3233 | ||
|
|
be85434876 | ||
|
|
6a07d08f49 | ||
|
|
f5b25e020b | ||
|
|
bf873365e7 | ||
|
|
925b2948f0 | ||
|
|
5359e9fed3 | ||
|
|
37010ad5b2 | ||
|
|
a26551985f | ||
|
|
f80a1bc42b | ||
|
|
5ba30e6f55 | ||
|
|
61ab6718e9 | ||
|
|
8706d45f3d | ||
|
|
eebf6f353d | ||
|
|
f9e3092feb | ||
|
|
9da308fd2a | ||
|
|
f11eae0761 | ||
|
|
7e2870af49 | ||
|
|
f132310a4d | ||
|
|
a0af03c3e2 | ||
|
|
d85e2e52d2 | ||
|
|
eab4d6d82b | ||
|
|
cf114c98d4 | ||
|
|
49d8c0b174 | ||
|
|
63c9384c8c | ||
|
|
0a1ef4aa0f | ||
|
|
ffb0278d95 | ||
|
|
e6a7757bb4 | ||
|
|
51ccaf6fb8 | ||
|
|
be771f225a | ||
|
|
aad59f2a9a | ||
|
|
d494952c82 | ||
|
|
14e04f7606 | ||
|
|
83f1801625 | ||
|
|
95530285d9 | ||
|
|
7b67ee408b | ||
|
|
7ffd3fff43 | ||
|
|
c013501f45 | ||
|
|
7ec2dff6fd | ||
|
|
b5cad43b26 | ||
|
|
714948505b | ||
|
|
564d6a203a | ||
|
|
c38dbdb640 | ||
|
|
c207fa6eff | ||
|
|
470a23f9b4 | ||
|
|
c4ba93a1e6 | ||
|
|
88946fd6d8 | ||
|
|
ab62fd977f | ||
|
|
0f111b3c5c | ||
|
|
b08f655024 | ||
|
|
d6ea1ed96f | ||
|
|
d06286916d | ||
|
|
0354ea87f7 | ||
|
|
ff2308b5f4 | ||
|
|
0ad70fb69a | ||
|
|
86a581f28f | ||
|
|
76f3db06b8 | ||
|
|
15878dc80c | ||
|
|
aacc71dc29 | ||
|
|
d56fb6ed22 | ||
|
|
8025d402e2 | ||
|
|
513a32a6e3 | ||
|
|
5c9d6d839e | ||
|
|
53fdcfd7e9 | ||
|
|
5f6617db7a | ||
|
|
6d4f18aa8c | ||
|
|
55066ceaad | ||
|
|
58c3e590b4 | ||
|
|
ff9e08a32f | ||
|
|
0335add437 | ||
|
|
5d0e7034e8 | ||
|
|
abc30821f4 | ||
|
|
e89991c445 | ||
|
|
df6df61ea9 | ||
|
|
5ed39fd0b3 | ||
|
|
17b69d4d56 | ||
|
|
5d7e22a53c | ||
|
|
073cd1125e | ||
|
|
7041001fb6 | ||
|
|
c264db339e | ||
|
|
8da43508f8 | ||
|
|
9867a65de7 | ||
|
|
9df117748e |
1550
.circleci/codecov.sh
1550
.circleci/codecov.sh
File diff suppressed because it is too large
Load Diff
@@ -1,445 +0,0 @@
|
||||
version: 2.1
|
||||
|
||||
executors:
|
||||
golang:
|
||||
docker:
|
||||
- image: tendermintdev/docker-tendermint-build
|
||||
working_directory: /go/src/github.com/tendermint/tendermint
|
||||
environment:
|
||||
GOBIN: /tmp/bin
|
||||
release:
|
||||
machine: true
|
||||
docs:
|
||||
docker:
|
||||
- image: tendermintdev/docker-website-deployment
|
||||
environment:
|
||||
AWS_REGION: us-east-1
|
||||
|
||||
commands:
|
||||
run_test:
|
||||
parameters:
|
||||
script_path:
|
||||
type: string
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore go modules cache"
|
||||
keys:
|
||||
- go-mod-v1-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
name: "Running test"
|
||||
command: |
|
||||
bash << parameters.script_path >>
|
||||
|
||||
jobs:
|
||||
setup_dependencies:
|
||||
executor: golang
|
||||
steps:
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore go modules cache"
|
||||
keys:
|
||||
- go-mod-v1-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
command: |
|
||||
mkdir -p /tmp/bin
|
||||
- run:
|
||||
name: Cache go modules
|
||||
command: make go-mod-cache
|
||||
- run:
|
||||
name: tools
|
||||
command: make tools
|
||||
- run:
|
||||
name: "Build binaries"
|
||||
command: make install install_abci
|
||||
- save_cache:
|
||||
name: "Save go modules cache"
|
||||
key: go-mod-v1-{{ checksum "go.sum" }}
|
||||
paths:
|
||||
- "/go/pkg/mod"
|
||||
- save_cache:
|
||||
name: "Save source code cache"
|
||||
key: go-src-v1-{{ .Revision }}
|
||||
paths:
|
||||
- ".git"
|
||||
- persist_to_workspace:
|
||||
root: "/tmp/bin"
|
||||
paths:
|
||||
- "."
|
||||
|
||||
test_abci_apps:
|
||||
executor: golang
|
||||
steps:
|
||||
- run_test:
|
||||
script_path: abci/tests/test_app/test.sh
|
||||
|
||||
# if this test fails, fix it and update the docs at:
|
||||
# https://github.com/tendermint/tendermint/blob/master/docs/abci-cli.md
|
||||
test_abci_cli:
|
||||
executor: golang
|
||||
steps:
|
||||
- run_test:
|
||||
script_path: abci/tests/test_cli/test.sh
|
||||
|
||||
test_apps:
|
||||
executor: golang
|
||||
steps:
|
||||
- run_test:
|
||||
script_path: test/app/test.sh
|
||||
|
||||
test_persistence:
|
||||
executor: golang
|
||||
steps:
|
||||
- run_test:
|
||||
script_path: test/persist/test_failure_indices.sh
|
||||
|
||||
test_cover:
|
||||
executor: golang
|
||||
parallelism: 4
|
||||
steps:
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore go module cache"
|
||||
keys:
|
||||
- go-mod-v2-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
name: "Run tests"
|
||||
command: |
|
||||
export VERSION="$(git describe --tags --long | sed 's/v\(.*\)/\1/')"
|
||||
export GO111MODULE=on
|
||||
mkdir -p /tmp/logs /tmp/workspace/profiles
|
||||
for pkg in $(go list github.com/tendermint/tendermint/... | circleci tests split --split-by=timings); do
|
||||
id=$(basename "$pkg")
|
||||
go test -v -timeout 5m -mod=readonly -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
|
||||
done
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- "profiles/*"
|
||||
- store_artifacts:
|
||||
path: /tmp/logs
|
||||
|
||||
localnet:
|
||||
working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint
|
||||
machine:
|
||||
image: circleci/classic:latest
|
||||
environment:
|
||||
GOBIN: /home/circleci/.go_workspace/bin
|
||||
GOPATH: /home/circleci/.go_workspace/
|
||||
GOOS: linux
|
||||
GOARCH: amd64
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: run localnet and exit on failure
|
||||
command: |
|
||||
set -x
|
||||
docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux
|
||||
make localnet-start &
|
||||
./scripts/localnet-blocks-test.sh 40 5 10 localhost
|
||||
|
||||
test_p2p:
|
||||
environment:
|
||||
GOBIN: /home/circleci/.go_workspace/bin
|
||||
GOPATH: /home/circleci/.go_workspace
|
||||
machine:
|
||||
image: circleci/classic:latest
|
||||
steps:
|
||||
- checkout
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
- run: bash test/p2p/circleci.sh
|
||||
- store_artifacts:
|
||||
path: /home/circleci/project/test/p2p/logs
|
||||
|
||||
upload_coverage:
|
||||
executor: golang
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore go module cache"
|
||||
keys:
|
||||
- go-mod-v2-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
name: gather
|
||||
command: |
|
||||
echo "mode: atomic" > coverage.txt
|
||||
for prof in $(ls /tmp/workspace/profiles/); do
|
||||
tail -n +2 /tmp/workspace/profiles/"$prof" >> coverage.txt
|
||||
done
|
||||
- run:
|
||||
name: upload
|
||||
command: bash .circleci/codecov.sh -f coverage.txt
|
||||
|
||||
deploy_docs:
|
||||
executor: docs
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: "Build docs"
|
||||
command: make build-docs
|
||||
- run:
|
||||
name: "Sync to S3"
|
||||
command: make sync-docs
|
||||
|
||||
prepare_build:
|
||||
executor: golang
|
||||
steps:
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- run:
|
||||
name: Get next release number
|
||||
command: |
|
||||
export LAST_TAG="`git describe --tags --abbrev=0 --match "${CIRCLE_BRANCH}.*"`"
|
||||
echo "Last tag: ${LAST_TAG}"
|
||||
if [ -z "${LAST_TAG}" ]; then
|
||||
export LAST_TAG="${CIRCLE_BRANCH}"
|
||||
echo "Last tag not found. Possibly fresh branch or feature branch. Setting ${LAST_TAG} as tag."
|
||||
fi
|
||||
export NEXT_TAG="`python -u scripts/release_management/bump-semver.py --version "${LAST_TAG}"`"
|
||||
echo "Next tag: ${NEXT_TAG}"
|
||||
echo "export CIRCLE_TAG=\"${NEXT_TAG}\"" > release-version.source
|
||||
- run:
|
||||
name: Build dependencies
|
||||
command: make tools
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- "release-version.source"
|
||||
- save_cache:
|
||||
key: v2-release-deps-{{ checksum "go.sum" }}
|
||||
paths:
|
||||
- "/go/pkg/mod"
|
||||
|
||||
build_artifacts:
|
||||
executor: golang
|
||||
parallelism: 4
|
||||
steps:
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore release dependencies cache"
|
||||
keys:
|
||||
- v2-release-deps-{{ checksum "go.sum" }}
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: Build artifact
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 0 ;then export GOOS=linux GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 1 ;then export GOOS=darwin GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 2 ;then export GOOS=windows GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 3 ;then export GOOS=linux GOARCH=arm && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
- persist_to_workspace:
|
||||
root: build
|
||||
paths:
|
||||
- "*.zip"
|
||||
- "tendermint_linux_amd64"
|
||||
|
||||
release_artifacts:
|
||||
executor: golang
|
||||
steps:
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: "Deploy to GitHub"
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
echo "---"
|
||||
ls -la /tmp/workspace/*.zip
|
||||
echo "---"
|
||||
python -u scripts/release_management/sha-files.py
|
||||
echo "---"
|
||||
cat /tmp/workspace/SHA256SUMS
|
||||
echo "---"
|
||||
export RELEASE_ID="`python -u scripts/release_management/github-draft.py`"
|
||||
echo "Release ID: ${RELEASE_ID}"
|
||||
#Todo: Parallelize uploads
|
||||
export GOOS=linux GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=darwin GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=windows GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=linux GOARCH=arm && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
python -u scripts/release_management/github-upload.py --file "/tmp/workspace/SHA256SUMS" --id "${RELEASE_ID}"
|
||||
python -u scripts/release_management/github-publish.py --id "${RELEASE_ID}"
|
||||
|
||||
release_docker:
|
||||
machine:
|
||||
image: ubuntu-1604:201903-01
|
||||
steps:
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: "Deploy to Docker Hub"
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
cp /tmp/workspace/tendermint_linux_amd64 DOCKER/tendermint
|
||||
docker build --label="tendermint" --tag="tendermint/tendermint:${CIRCLE_TAG}" --tag="tendermint/tendermint:latest" "DOCKER"
|
||||
docker login -u "${DOCKERHUB_USER}" --password-stdin \<<< "${DOCKERHUB_PASS}"
|
||||
docker push "tendermint/tendermint"
|
||||
docker logout
|
||||
|
||||
reproducible_builds:
|
||||
executor: golang
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
docker_layer_caching: true
|
||||
- run:
|
||||
name: Build tendermint
|
||||
no_output_timeout: 20m
|
||||
command: |
|
||||
sudo apt-get install -y ruby
|
||||
bash -x ./scripts/gitian-build.sh all
|
||||
for os in darwin linux windows; do
|
||||
cp gitian-build-${os}/result/tendermint-${os}-res.yml .
|
||||
cp gitian-build-${os}/build/out/tendermint-*.tar.gz .
|
||||
rm -rf gitian-build-${os}/
|
||||
done
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/tendermint/tendermint/tendermint-darwin-res.yml
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/tendermint/tendermint/tendermint-linux-res.yml
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/tendermint/tendermint/tendermint-windows-res.yml
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/tendermint/tendermint/tendermint-*.tar.gz
|
||||
|
||||
# Test RPC implementation against the swagger documented specs
|
||||
contract_tests:
|
||||
working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint
|
||||
machine:
|
||||
image: circleci/classic:latest
|
||||
environment:
|
||||
GOBIN: /home/circleci/.go_workspace/bin
|
||||
GOPATH: /home/circleci/.go_workspace/
|
||||
GOOS: linux
|
||||
GOARCH: amd64
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Test RPC endpoints against swagger documentation
|
||||
command: |
|
||||
set -x
|
||||
export PATH=~/.local/bin:$PATH
|
||||
|
||||
# install node and dredd
|
||||
./scripts/get_nodejs.sh
|
||||
|
||||
# build the binaries with a proper version of Go
|
||||
docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux build-contract-tests-hooks
|
||||
|
||||
# This docker image works with go 1.7, we can install here the hook handler that contract-tests is going to use
|
||||
go get github.com/snikch/goodman/cmd/goodman
|
||||
make contract-tests
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
test-suite:
|
||||
jobs:
|
||||
- deploy_docs:
|
||||
context: tendermint-docs
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
tags:
|
||||
only:
|
||||
- /^v.*/
|
||||
- deploy_docs:
|
||||
context: tendermint-docs-staging
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- docs-theme-latest
|
||||
- setup_dependencies
|
||||
- test_abci_apps:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
- test_abci_cli:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
- test_apps:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
- test_cover:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
- test_persistence:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
- localnet:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
- test_p2p
|
||||
- upload_coverage:
|
||||
requires:
|
||||
- test_cover
|
||||
- reproducible_builds:
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /v[0-9]+\.[0-9]+/
|
||||
- contract_tests:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
|
||||
release:
|
||||
jobs:
|
||||
- prepare_build
|
||||
- build_artifacts:
|
||||
requires:
|
||||
- prepare_build
|
||||
- release_artifacts:
|
||||
requires:
|
||||
- prepare_build
|
||||
- build_artifacts
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /v[0-9]+\.[0-9]+/
|
||||
- release_docker:
|
||||
requires:
|
||||
- prepare_build
|
||||
- build_artifacts
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /v[0-9]+\.[0-9]+/
|
||||
- master
|
||||
8
.github/linter/markdownlint.yml
vendored
Normal file
8
.github/linter/markdownlint.yml
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
default: true,
|
||||
MD007: { "indent": 4 }
|
||||
MD013: false
|
||||
MD024: { siblings_only: true }
|
||||
MD025: false
|
||||
MD033: { no-inline-html: false }
|
||||
no-hard-tabs: false
|
||||
whitespace: false
|
||||
127
.github/workflows/coverage.yml
vendored
Normal file
127
.github/workflows/coverage.yml
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
name: Test Coverage
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
split-test-files:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Create a file with all the pkgs
|
||||
run: go list ./... > pkgs.txt
|
||||
- name: Split pkgs into 4 files
|
||||
run: split -d -n l/4 pkgs.txt pkgs.txt.part.
|
||||
# cache multiple
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-00"
|
||||
path: ./pkgs.txt.part.00
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-01"
|
||||
path: ./pkgs.txt.part.01
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-02"
|
||||
path: ./pkgs.txt.part.02
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-03"
|
||||
path: ./pkgs.txt.part.03
|
||||
|
||||
build-linux:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
goarch: ["arm", "amd64"]
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: GOOS=linux GOARCH=${{ matrix.goarch }} make build
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
tests:
|
||||
runs-on: ubuntu-latest
|
||||
needs: split-test-files
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
part: ["00", "01", "02", "03"]
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}"
|
||||
if: env.GIT_DIFF
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.15
|
||||
- name: test & coverage report creation
|
||||
run: |
|
||||
cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-${{ matrix.part }}-coverage"
|
||||
path: ./${{ matrix.part }}profile.out
|
||||
|
||||
upload-coverage-report:
|
||||
runs-on: ubuntu-latest
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-00-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-01-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-02-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/download-artifact@v2
|
||||
with:
|
||||
name: "${{ github.sha }}-03-coverage"
|
||||
if: env.GIT_DIFF
|
||||
- run: |
|
||||
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
- uses: codecov/codecov-action@v1.0.15
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
59
.github/workflows/docker.yml
vendored
Normal file
59
.github/workflows/docker.yml
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
name: Build & Push
|
||||
# Build & Push rebuilds the tendermint docker image on every push to master and creation of tags
|
||||
# and pushes the image to https://hub.docker.com/r/interchainio/simapp/tags
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
- "v[0-9]+.[0-9]+.[0-9]+-rc*" # Push events to matching v*, i.e. v1.0-rc1, v20.15.10-rc5
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@master
|
||||
- name: Prepare
|
||||
id: prep
|
||||
run: |
|
||||
DOCKER_IMAGE=tendermint/tendermint
|
||||
VERSION=noop
|
||||
if [[ $GITHUB_REF == refs/tags/* ]]; then
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
elif [[ $GITHUB_REF == refs/heads/* ]]; then
|
||||
VERSION=$(echo ${GITHUB_REF#refs/heads/} | sed -r 's#/+#-#g')
|
||||
if [ "${{ github.event.repository.default_branch }}" = "$VERSION" ]; then
|
||||
VERSION=latest
|
||||
fi
|
||||
fi
|
||||
TAGS="${DOCKER_IMAGE}:${VERSION}"
|
||||
if [[ $VERSION =~ ^v[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
|
||||
TAGS="$TAGS,${DOCKER_IMAGE}:${VERSION}"
|
||||
fi
|
||||
echo ::set-output name=tags::${TAGS}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build Tendermint
|
||||
run: |
|
||||
make build-linux && cp build/tendermint DOCKER/tendermint
|
||||
|
||||
- name: Publish to Docker Hub
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: ./DOCKER
|
||||
file: ./DOCKER/Dockerfile
|
||||
push: ${{ github.event_name != 'pull_request' }}
|
||||
tags: ${{ steps.prep.outputs.tags }}
|
||||
12
.github/workflows/linkchecker.yml
vendored
Normal file
12
.github/workflows/linkchecker.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
name: Check Markdown links
|
||||
on:
|
||||
schedule:
|
||||
- cron: '* */24 * * *'
|
||||
jobs:
|
||||
markdown-link-check:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@master
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.8
|
||||
with:
|
||||
folder-path: "docs"
|
||||
29
.github/workflows/lint.yaml
vendored
Normal file
29
.github/workflows/lint.yaml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: Lint
|
||||
# Lint runs golangci-lint over the entire Tendermint repository
|
||||
# This workflow is run on every pull request and push to master
|
||||
# The `golangci` job will pass without running if no *.{go, mod, sum} files have been modified.
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
jobs:
|
||||
golangci:
|
||||
name: golangci-lint
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 4
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: golangci/golangci-lint-action@v1
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.26
|
||||
args: --timeout 10m
|
||||
github-token: ${{ secrets.github_token }}
|
||||
if: env.GIT_DIFF
|
||||
31
.github/workflows/linter.yml
vendored
Normal file
31
.github/workflows/linter.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Lint
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- "**.md"
|
||||
- "**.yml"
|
||||
- "**.yaml"
|
||||
pull_request:
|
||||
branches: [master]
|
||||
paths:
|
||||
- "**.md"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Super linter
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint Code Base
|
||||
uses: docker://github/super-linter:v3
|
||||
env:
|
||||
LINTER_RULES_PATH: .
|
||||
VALIDATE_ALL_CODEBASE: true
|
||||
DEFAULT_BRANCH: master
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
VALIDATE_MD: true
|
||||
VALIDATE_OPAENAPI: true
|
||||
VALIDATE_YAML: true
|
||||
29
.github/workflows/release.yml
vendored
Normal file
29
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
name: "Release"
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
|
||||
|
||||
jobs:
|
||||
goreleaser:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: '1.15'
|
||||
|
||||
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
with:
|
||||
version: latest
|
||||
args: release --rm-dist --release-notes=../release_notes.md
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
18
.github/workflows/stale.yml
vendored
Normal file
18
.github/workflows/stale.yml
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
name: "Close stale pull requests"
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v3
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you
|
||||
for your contributions."
|
||||
days-before-stale: 10
|
||||
days-before-close: 4
|
||||
exempt-pr-labels: "S:wip"
|
||||
146
.github/workflows/tests.yml
vendored
Normal file
146
.github/workflows/tests.yml
vendored
Normal file
@@ -0,0 +1,146 @@
|
||||
name: Tests
|
||||
# Tests runs different tests (test_abci_apps, test_abci_cli, test_apps)
|
||||
# This workflow runs on every push to master or release branch and every pull requests
|
||||
# All jobs will pass without running if no *{.go, .mod, .sum} files have been modified
|
||||
on:
|
||||
pull_request:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- release/**
|
||||
|
||||
jobs:
|
||||
cleanup-runs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: rokroskar/workflow-run-cleanup-action@master
|
||||
env:
|
||||
GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
|
||||
if: "!startsWith(github.ref, 'refs/tags/') && github.ref != 'refs/heads/master'"
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- name: install
|
||||
run: make install install_abci
|
||||
if: "env.GIT_DIFF != ''"
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
# Cache binaries for use by other jobs
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- name: test_abci_apps
|
||||
run: abci/tests/test_app/test.sh
|
||||
shell: bash
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_abci_cli:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- run: abci/tests/test_cli/test.sh
|
||||
shell: bash
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_apps:
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
timeout-minutes: 5
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.15"
|
||||
- uses: actions/checkout@v2
|
||||
- uses: technote-space/get-diff-action@v4
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/cache@v2.1.3
|
||||
with:
|
||||
path: ~/go/bin
|
||||
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
|
||||
if: env.GIT_DIFF
|
||||
- name: test_apps
|
||||
run: test/app/test.sh
|
||||
shell: bash
|
||||
if: env.GIT_DIFF
|
||||
@@ -16,14 +16,14 @@ linters:
|
||||
- gofmt
|
||||
- goimports
|
||||
# - golint
|
||||
- gosec
|
||||
# - gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- interfacer
|
||||
- lll
|
||||
- misspell
|
||||
- maligned
|
||||
# - maligned
|
||||
- nakedret
|
||||
- prealloc
|
||||
- scopelint
|
||||
@@ -83,3 +83,5 @@ linters-settings:
|
||||
# disabled-checks:
|
||||
# - wrapperFunc
|
||||
# - commentFormatting # https://github.com/go-critic/go-critic/issues/755
|
||||
service:
|
||||
golangci-lint-version: 1.26.x
|
||||
|
||||
28
.goreleaser.yml
Normal file
28
.goreleaser.yml
Normal file
@@ -0,0 +1,28 @@
|
||||
project_name: Tendermint
|
||||
|
||||
env:
|
||||
# Require use of Go modules.
|
||||
- GO111MODULE=on
|
||||
|
||||
builds:
|
||||
- id: "Tendermint"
|
||||
main: ./cmd/tendermint/main.go
|
||||
ldflags:
|
||||
- -s -w -X github.com/tendermint/tendermint/version.TMCoreSemVer={{ .Version }}
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- darwin
|
||||
- linux
|
||||
- windows
|
||||
goarch:
|
||||
- amd64
|
||||
- arm
|
||||
- arm64
|
||||
|
||||
checksum:
|
||||
name_template: SHA256SUMS-{{.Version}}.txt
|
||||
algorithm: sha256
|
||||
|
||||
release:
|
||||
name_template: "{{.Version}} (WARNING: BETA SOFTWARE)"
|
||||
964
CHANGELOG.md
964
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
@@ -1,9 +1,8 @@
|
||||
## v0.32.8
|
||||
## v0.32.15
|
||||
|
||||
\*\*
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@erikgrinaker
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
@@ -15,24 +14,12 @@ program](https://hackerone.com/tendermint).
|
||||
- Apps
|
||||
|
||||
- Go API
|
||||
- [libs/pubsub] [\#4070](https://github.com/tendermint/tendermint/pull/4070) `Query#(Matches|Conditions)` returns an error.
|
||||
|
||||
### FEATURES:
|
||||
|
||||
### IMPROVEMENTS:
|
||||
|
||||
- [mempool] [\#4083](https://github.com/tendermint/tendermint/pull/4083) Added TxInfo parameter to CheckTx(), and removed CheckTxWithInfo() (@erikgrinaker)
|
||||
- [mempool] [\#4057](https://github.com/tendermint/tendermint/issues/4057) Include peer ID when logging rejected txns (@erikgrinaker)
|
||||
- [tools] [\#4023](https://github.com/tendermint/tendermint/issues/4023) Improved `tm-monitor` formatting of start time and avg tx throughput (@erikgrinaker)
|
||||
- [libs/pubsub] [\#4070](https://github.com/tendermint/tendermint/pull/4070) No longer panic in `Query#(Matches|Conditions)` preferring to return an error instead.
|
||||
- [libs/pubsub] [\#4070](https://github.com/tendermint/tendermint/pull/4070) Strip out non-numeric characters when attempting to match numeric values.
|
||||
- [p2p] [\#3991](https://github.com/tendermint/tendermint/issues/3991) Log "has been established or dialed" as debug log instead of Error for connected peers (@whunmr)
|
||||
- [rpc] [\#4077](https://github.com/tendermint/tendermint/pull/4077) Added support for `EXISTS` clause to the Websocket query interface.
|
||||
- [privval] Add `SignerDialerEndpointRetryWaitInterval` option (@cosmostuba)
|
||||
- [crypto] Add `RegisterKeyType` to amino to allow external key types registration (@austinabell)
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
- [tools] [\#4023](https://github.com/tendermint/tendermint/issues/4023) Refresh `tm-monitor` health when validator count is updated (@erikgrinaker)
|
||||
- [state] [\#4104](https://github.com/tendermint/tendermint/pull/4104) txindex/kv: Fsync data to disk immediately after receiving it (@guagualvcha)
|
||||
- [state] [\#4095](https://github.com/tendermint/tendermint/pull/4095) txindex/kv: Return an error if there's one when the user searches for a tx (hash=X) (@hsyis)
|
||||
- [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes)
|
||||
- [blockchain/v1] \#5711 Fix deadlock (@melekes)
|
||||
|
||||
@@ -53,7 +53,7 @@ func testCounter() {
|
||||
}
|
||||
|
||||
fmt.Printf("Running %s test with abci=%s\n", abciApp, abciType)
|
||||
cmd := exec.Command("bash", "-c", fmt.Sprintf("abci-cli %s", abciApp))
|
||||
cmd := exec.Command("bash", "-c", fmt.Sprintf("abci-cli %s", abciApp)) //nolint:gosec
|
||||
cmd.Stdout = os.Stdout
|
||||
if err := cmd.Start(); err != nil {
|
||||
log.Fatalf("starting %q err: %v", abciApp, err)
|
||||
|
||||
@@ -176,7 +176,7 @@ func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage,
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := decodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
|
||||
bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -30,6 +30,7 @@ type BpPeer struct {
|
||||
Height int64 // the peer reported height
|
||||
NumPendingBlockRequests int // number of requests still waiting for block responses
|
||||
blocks map[int64]*types.Block // blocks received or expected to be received from this peer
|
||||
noBlocks map[int64]struct{} // heights for which the peer does not have blocks
|
||||
blockResponseTimer *time.Timer
|
||||
recvMonitor *flow.Monitor
|
||||
params *BpPeerParams // parameters for timer and monitor
|
||||
@@ -45,12 +46,13 @@ func NewBpPeer(
|
||||
params = BpPeerDefaultParams()
|
||||
}
|
||||
return &BpPeer{
|
||||
ID: peerID,
|
||||
Height: height,
|
||||
blocks: make(map[int64]*types.Block, maxRequestsPerPeer),
|
||||
logger: log.NewNopLogger(),
|
||||
onErr: onErr,
|
||||
params: params,
|
||||
ID: peerID,
|
||||
Height: height,
|
||||
blocks: make(map[int64]*types.Block, maxRequestsPerPeer),
|
||||
noBlocks: make(map[int64]struct{}),
|
||||
logger: log.NewNopLogger(),
|
||||
onErr: onErr,
|
||||
params: params,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -129,6 +131,19 @@ func (peer *BpPeer) RemoveBlock(height int64) {
|
||||
delete(peer.blocks, height)
|
||||
}
|
||||
|
||||
// SetNoBlock records that the peer does not have a block for height.
|
||||
func (peer *BpPeer) SetNoBlock(height int64) {
|
||||
peer.noBlocks[height] = struct{}{}
|
||||
}
|
||||
|
||||
// NoBlock returns true if the peer does not have a block for height.
|
||||
func (peer *BpPeer) NoBlock(height int64) bool {
|
||||
if _, ok := peer.noBlocks[height]; ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// RequestSent records that a request was sent, and starts the peer timer and monitor if needed.
|
||||
func (peer *BpPeer) RequestSent(height int64) {
|
||||
peer.blocks[height] = nil
|
||||
|
||||
@@ -99,6 +99,18 @@ func (pool *BlockPool) UpdatePeer(peerID p2p.ID, height int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetNoBlock records that the peer does not have a block for height and
|
||||
// schedules a new request for that height from another peer.
|
||||
func (pool *BlockPool) SetNoBlock(peerID p2p.ID, height int64) {
|
||||
peer := pool.peers[peerID]
|
||||
if peer == nil {
|
||||
return
|
||||
}
|
||||
peer.SetNoBlock(height)
|
||||
|
||||
pool.rescheduleRequest(peerID, height)
|
||||
}
|
||||
|
||||
// Cleans and deletes the peer. Recomputes the max peer height.
|
||||
func (pool *BlockPool) deletePeer(peer *BpPeer) {
|
||||
if peer == nil {
|
||||
@@ -213,7 +225,7 @@ func (pool *BlockPool) sendRequest(height int64) bool {
|
||||
if peer.NumPendingBlockRequests >= maxRequestsPerPeer {
|
||||
continue
|
||||
}
|
||||
if peer.Height < height {
|
||||
if peer.Height < height || peer.NoBlock(height) {
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
amino "github.com/tendermint/go-amino"
|
||||
@@ -73,6 +74,9 @@ type BlockchainReactor struct {
|
||||
eventsFromFSMCh chan bcFsmMessage
|
||||
|
||||
swReporter *behaviour.SwitchReporter
|
||||
|
||||
// Atomic integer (0 - sync in progress, 1 - finished syncing)
|
||||
syncEnded int32
|
||||
}
|
||||
|
||||
// NewBlockchainReactor returns new reactor instance.
|
||||
@@ -144,13 +148,22 @@ func (bcR *BlockchainReactor) OnStart() error {
|
||||
bcR.swReporter = behaviour.NewSwitcReporter(bcR.BaseReactor.Switch)
|
||||
if bcR.fastSync {
|
||||
go bcR.poolRoutine()
|
||||
} else { // if we're not fast syncing, mark it as finished
|
||||
bcR.setSyncEnded()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements cmn.Service.
|
||||
func (bcR *BlockchainReactor) OnStop() {
|
||||
_ = bcR.Stop()
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) isSyncEnded() bool {
|
||||
return atomic.LoadInt32(&(bcR.syncEnded)) != 0
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) setSyncEnded() {
|
||||
atomic.StoreInt32(&(bcR.syncEnded), 1)
|
||||
}
|
||||
|
||||
// GetChannels implements Reactor
|
||||
@@ -201,6 +214,10 @@ func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcStatusRequestMessa
|
||||
|
||||
// RemovePeer implements Reactor by removing peer from the pool.
|
||||
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
if bcR.isSyncEnded() {
|
||||
return
|
||||
}
|
||||
|
||||
msgData := bcReactorMessage{
|
||||
event: peerRemoveEv,
|
||||
data: bReactorEventData{
|
||||
@@ -244,6 +261,10 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
}
|
||||
|
||||
case *bcBlockResponseMessage:
|
||||
if bcR.isSyncEnded() {
|
||||
return
|
||||
}
|
||||
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
@@ -256,7 +277,26 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
bcR.Logger.Info("Received", "src", src, "height", msg.Block.Height)
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
|
||||
case *bcNoBlockResponseMessage:
|
||||
if bcR.isSyncEnded() {
|
||||
return
|
||||
}
|
||||
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: noBlockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: src.ID(),
|
||||
height: msg.Height,
|
||||
},
|
||||
}
|
||||
bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height)
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
|
||||
case *bcStatusResponseMessage:
|
||||
if bcR.isSyncEnded() {
|
||||
return
|
||||
}
|
||||
|
||||
// Got a peer status. Unverified.
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: statusResponseEv,
|
||||
@@ -267,7 +307,6 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
},
|
||||
}
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
|
||||
default:
|
||||
bcR.Logger.Error(fmt.Sprintf("unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
@@ -275,16 +314,20 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
|
||||
// processBlocksRoutine processes blocks until signlaed to stop over the stopProcessing channel
|
||||
func (bcR *BlockchainReactor) processBlocksRoutine(stopProcessing chan struct{}) {
|
||||
|
||||
processReceivedBlockTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
doProcessBlockCh := make(chan struct{}, 1)
|
||||
defer processReceivedBlockTicker.Stop()
|
||||
|
||||
lastHundred := time.Now()
|
||||
lastRate := 0.0
|
||||
var (
|
||||
doProcessBlockCh = make(chan struct{}, 1)
|
||||
lastHundred = time.Now()
|
||||
lastRate = 0.0
|
||||
)
|
||||
|
||||
ForLoop:
|
||||
for {
|
||||
select {
|
||||
case <-bcR.Quit():
|
||||
break ForLoop
|
||||
case <-stopProcessing:
|
||||
bcR.Logger.Info("finishing block execution")
|
||||
break ForLoop
|
||||
@@ -327,12 +370,14 @@ ForLoop:
|
||||
|
||||
// poolRoutine receives and handles messages from the Receive() routine and from the FSM.
|
||||
func (bcR *BlockchainReactor) poolRoutine() {
|
||||
|
||||
bcR.fsm.Start()
|
||||
|
||||
sendBlockRequestTicker := time.NewTicker(trySendIntervalMS * time.Millisecond)
|
||||
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
|
||||
|
||||
defer sendBlockRequestTicker.Stop()
|
||||
// NOTE: statusUpdateTicker can continue to run
|
||||
|
||||
stopProcessing := make(chan struct{}, 1)
|
||||
go bcR.processBlocksRoutine(stopProcessing)
|
||||
|
||||
@@ -365,12 +410,10 @@ ForLoop:
|
||||
|
||||
case msg := <-bcR.eventsFromFSMCh:
|
||||
switch msg.event {
|
||||
case syncFinishedEv:
|
||||
case syncFinishedEv: // Sent from the FSM when it enters finished state.
|
||||
stopProcessing <- struct{}{}
|
||||
// Sent from the FSM when it enters finished state.
|
||||
break ForLoop
|
||||
case peerErrorEv:
|
||||
// Sent from the FSM when it detects peer error
|
||||
bcR.setSyncEnded()
|
||||
case peerErrorEv: // Sent from the FSM when it detects peer error
|
||||
bcR.reportPeerErrorToSwitch(msg.data.err, msg.data.peerID)
|
||||
if msg.data.err == errNoPeerResponse {
|
||||
// Sent from the peer timeout handler routine
|
||||
@@ -437,14 +480,12 @@ func (bcR *BlockchainReactor) processBlock() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// sendStatusRequest broadcasts `BlockStore` height.
|
||||
func (bcR *BlockchainReactor) sendStatusRequest() {
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{bcR.store.Height()})
|
||||
bcR.Switch.Broadcast(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// BlockRequest sends `BlockRequest` height.
|
||||
func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
@@ -460,19 +501,14 @@ func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
func (bcR *BlockchainReactor) switchToConsensus() {
|
||||
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(bcR.state, bcR.blocksSynced)
|
||||
bcR.eventsFromFSMCh <- bcFsmMessage{event: syncFinishedEv}
|
||||
}
|
||||
// else {
|
||||
// Should only happen during testing.
|
||||
// }
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// Called by FSM and pool:
|
||||
// - pool calls when it detects slow peer or when peer times out
|
||||
// - FSM calls when:
|
||||
@@ -490,7 +526,6 @@ func (bcR *BlockchainReactor) sendPeerError(err error, peerID p2p.ID) {
|
||||
bcR.eventsFromFSMCh <- msgData
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
func (bcR *BlockchainReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
|
||||
if timer == nil {
|
||||
panic("nil timer pointer parameter")
|
||||
|
||||
@@ -73,6 +73,7 @@ const (
|
||||
startFSMEv = iota + 1
|
||||
statusResponseEv
|
||||
blockResponseEv
|
||||
noBlockResponseEv
|
||||
processedBlockEv
|
||||
makeRequestsEv
|
||||
stopFSMEv
|
||||
@@ -93,6 +94,9 @@ func (msg *bcReactorMessage) String() string {
|
||||
case blockResponseEv:
|
||||
dataStr = fmt.Sprintf("peer=%v block.height=%v length=%v",
|
||||
msg.data.peerID, msg.data.block.Height, msg.data.length)
|
||||
case noBlockResponseEv:
|
||||
dataStr = fmt.Sprintf("peer=%v requested height=%v",
|
||||
msg.data.peerID, msg.data.height)
|
||||
case processedBlockEv:
|
||||
dataStr = fmt.Sprintf("error=%v", msg.data.err)
|
||||
case makeRequestsEv:
|
||||
@@ -118,6 +122,8 @@ func (ev bReactorEvent) String() string {
|
||||
return "statusResponseEv"
|
||||
case blockResponseEv:
|
||||
return "blockResponseEv"
|
||||
case noBlockResponseEv:
|
||||
return "noBlockResponseEv"
|
||||
case processedBlockEv:
|
||||
return "processedBlockEv"
|
||||
case makeRequestsEv:
|
||||
@@ -268,7 +274,11 @@ func init() {
|
||||
return waitForPeer, err
|
||||
}
|
||||
return waitForBlock, err
|
||||
case noBlockResponseEv:
|
||||
fsm.logger.Error("peer does not have requested block", "peer", data.peerID)
|
||||
fsm.pool.SetNoBlock(data.peerID, data.height)
|
||||
|
||||
return waitForBlock, nil
|
||||
case processedBlockEv:
|
||||
if data.err != nil {
|
||||
first, second, _ := fsm.pool.FirstTwoBlocksAndPeers()
|
||||
|
||||
@@ -100,6 +100,19 @@ func sProcessedBlockEv(current, expected string, reactorError error) fsmStepTest
|
||||
}
|
||||
}
|
||||
|
||||
func sNoBlockResponseEv(current, expected string, peerID p2p.ID, height int64, err error) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: noBlockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peerID,
|
||||
height: height,
|
||||
},
|
||||
wantState: expected,
|
||||
wantErr: err,
|
||||
}
|
||||
}
|
||||
|
||||
func sStatusEv(current, expected string, peerID p2p.ID, height int64, err error) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
@@ -352,6 +365,46 @@ func TestFSMBlockVerificationFailure(t *testing.T) {
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMNoBlockResponse(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "no block response",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
|
||||
// add P1 and get blocks 1-3 from it
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
|
||||
// process block failure, should remove P1 and all blocks
|
||||
sNoBlockResponseEv("waitForBlock", "waitForBlock", "P1", 1, nil),
|
||||
sNoBlockResponseEv("waitForBlock", "waitForBlock", "P1", 2, nil),
|
||||
sNoBlockResponseEv("waitForBlock", "waitForBlock", "P1", 3, nil),
|
||||
|
||||
// get blocks 1-3 from P2
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}),
|
||||
|
||||
// finish after processing blocks 1 and 2
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMBadBlockFromPeer(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
@@ -820,7 +873,7 @@ const (
|
||||
maxRequestsPerPeerTest = 20
|
||||
maxTotalPendingRequestsTest = 600
|
||||
maxNumPeersTest = 1000
|
||||
maxNumBlocksInChainTest = 10000 //should be smaller than 9999999
|
||||
maxNumBlocksInChainTest = 10000 // should be smaller than 9999999
|
||||
)
|
||||
|
||||
func makeCorrectTransitionSequenceWithRandomParameters() testFields {
|
||||
|
||||
@@ -226,11 +226,9 @@ func TestFastSyncNoBlockResponse(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: This is too hard to test without
|
||||
// an easy way to add test peer to switch
|
||||
// or without significant refactoring of the module.
|
||||
// Alternatively we could actually dial a TCP conn but
|
||||
// that seems extreme.
|
||||
// NOTE: This is too hard to test without an easy way to add test peer to
|
||||
// switch or without significant refactoring of the module. Alternatively we
|
||||
// could actually dial a TCP conn but that seems extreme.
|
||||
func TestFastSyncBadBlockStopsPeer(t *testing.T) {
|
||||
numNodes := 4
|
||||
maxBlockHeight := int64(148)
|
||||
|
||||
@@ -97,7 +97,6 @@ func (r *Reactor) demux() {
|
||||
|
||||
func (r *Reactor) Stop() {
|
||||
r.logger.Info("reactor stopping")
|
||||
|
||||
r.ticker.Stop()
|
||||
r.scheduler.stop()
|
||||
r.processor.stop()
|
||||
|
||||
@@ -19,6 +19,9 @@ type Metrics struct {
|
||||
// Height of the chain.
|
||||
Height metrics.Gauge
|
||||
|
||||
// ValidatorLastSignedHeight of a validator.
|
||||
ValidatorLastSignedHeight metrics.Gauge
|
||||
|
||||
// Number of rounds.
|
||||
Rounds metrics.Gauge
|
||||
|
||||
@@ -26,6 +29,10 @@ type Metrics struct {
|
||||
Validators metrics.Gauge
|
||||
// Total power of all validators.
|
||||
ValidatorsPower metrics.Gauge
|
||||
// Power of a validator.
|
||||
ValidatorPower metrics.Gauge
|
||||
// Amount of blocks missed by a validator.
|
||||
ValidatorMissedBlocks metrics.Gauge
|
||||
// Number of validators who did not sign.
|
||||
MissingValidators metrics.Gauge
|
||||
// Total power of the missing validators.
|
||||
@@ -81,12 +88,30 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Name: "validators",
|
||||
Help: "Number of validators.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ValidatorLastSignedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "validator_last_signed_height",
|
||||
Help: "Last signed height for a validator",
|
||||
}, append(labels, "validator_address")).With(labelsAndValues...),
|
||||
ValidatorMissedBlocks: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "validator_missed_blocks",
|
||||
Help: "Total missed blocks for a validator",
|
||||
}, append(labels, "validator_address")).With(labelsAndValues...),
|
||||
ValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "validators_power",
|
||||
Help: "Total power of all validators.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ValidatorPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "validator_power",
|
||||
Help: "Power of a validator",
|
||||
}, append(labels, "validator_address")).With(labelsAndValues...),
|
||||
MissingValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
@@ -163,10 +188,14 @@ func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
Height: discard.NewGauge(),
|
||||
|
||||
ValidatorLastSignedHeight: discard.NewGauge(),
|
||||
|
||||
Rounds: discard.NewGauge(),
|
||||
|
||||
Validators: discard.NewGauge(),
|
||||
ValidatorsPower: discard.NewGauge(),
|
||||
ValidatorPower: discard.NewGauge(),
|
||||
ValidatorMissedBlocks: discard.NewGauge(),
|
||||
MissingValidators: discard.NewGauge(),
|
||||
MissingValidatorsPower: discard.NewGauge(),
|
||||
ByzantineValidators: discard.NewGauge(),
|
||||
|
||||
@@ -57,7 +57,7 @@ func NewConsensusReactor(consensusState *ConsensusState, fastSync bool, options
|
||||
metrics: NopMetrics(),
|
||||
}
|
||||
conR.updateFastSyncingMetric()
|
||||
conR.BaseReactor = *p2p.NewBaseReactor("ConsensusReactor", conR)
|
||||
conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR)
|
||||
|
||||
for _, option := range options {
|
||||
option(conR)
|
||||
@@ -218,7 +218,7 @@ func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
|
||||
msg, err := decodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
|
||||
conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
conR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
@@ -501,10 +501,12 @@ OUTER_LOOP:
|
||||
if prs.ProposalBlockParts == nil {
|
||||
blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height)
|
||||
if blockMeta == nil {
|
||||
panic(fmt.Sprintf("Failed to load block %d when blockStore is at %d",
|
||||
prs.Height, conR.conS.blockStore.Height()))
|
||||
heightLogger.Error("Failed to load block meta",
|
||||
"blockstoreHeight", conR.conS.blockStore.Height())
|
||||
time.Sleep(conR.conS.config.PeerGossipSleepDuration)
|
||||
} else {
|
||||
ps.InitProposalBlockParts(blockMeta.BlockID.PartsHeader)
|
||||
}
|
||||
ps.InitProposalBlockParts(blockMeta.BlockID.PartsHeader)
|
||||
// continue the loop since prs is a copy and not effected by this initialization
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
|
||||
@@ -888,7 +888,7 @@ func TestVoteSetMaj23MessageValidateBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestVoteSetBitsMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct { // nolint: maligned
|
||||
testCases := []struct {
|
||||
malleateFn func(*VoteSetBitsMessage)
|
||||
expErr string
|
||||
}{
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cstypes "github.com/tendermint/tendermint/consensus/types"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
tmevents "github.com/tendermint/tendermint/libs/events"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
@@ -31,6 +32,8 @@ var (
|
||||
ErrInvalidProposalPOLRound = errors.New("Error invalid proposal POL round")
|
||||
ErrAddingVote = errors.New("Error adding vote")
|
||||
ErrVoteHeightMismatch = errors.New("Error vote height mismatch")
|
||||
|
||||
errPubKeyIsNotSet = errors.New("pubkey is not set. Look for \"Can't get private validator pubkey\" errors")
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@@ -95,6 +98,9 @@ type ConsensusState struct {
|
||||
mtx sync.RWMutex
|
||||
cstypes.RoundState
|
||||
state sm.State // State until height-1.
|
||||
// privValidator pubkey, memoized for the duration of one block
|
||||
// to avoid extra requests to HSM
|
||||
privValidatorPubKey crypto.PubKey
|
||||
|
||||
// state changes may be triggered by: msgs from peers,
|
||||
// msgs from ourself, or by timeouts
|
||||
@@ -251,11 +257,17 @@ func (cs *ConsensusState) GetValidators() (int64, []*types.Validator) {
|
||||
return cs.state.LastBlockHeight, cs.state.Validators.Copy().Validators
|
||||
}
|
||||
|
||||
// SetPrivValidator sets the private validator account for signing votes.
|
||||
// SetPrivValidator sets the private validator account for signing votes. It
|
||||
// immediately requests pubkey and caches it.
|
||||
func (cs *ConsensusState) SetPrivValidator(priv types.PrivValidator) {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
|
||||
cs.privValidator = priv
|
||||
cs.mtx.Unlock()
|
||||
|
||||
if err := cs.updatePrivValidatorPubKey(); err != nil {
|
||||
cs.Logger.Error("Can't get private validator pubkey", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
// SetTimeoutTicker sets the local timer. It may be useful to overwrite for testing.
|
||||
@@ -920,8 +932,17 @@ func (cs *ConsensusState) enterPropose(height int64, round int) {
|
||||
return
|
||||
}
|
||||
|
||||
logger.Debug("This node is a validator")
|
||||
|
||||
if cs.privValidatorPubKey == nil {
|
||||
// If this node is a validator & proposer in the current round, it will
|
||||
// miss the opportunity to create a block.
|
||||
logger.Error(fmt.Sprintf("enterPropose: %v", errPubKeyIsNotSet))
|
||||
return
|
||||
}
|
||||
address := cs.privValidatorPubKey.Address()
|
||||
|
||||
// if not a validator, we're done
|
||||
address := cs.privValidator.GetPubKey().Address()
|
||||
if !cs.Validators.HasAddress(address) {
|
||||
logger.Debug("This node is not a validator", "addr", address, "vals", cs.Validators)
|
||||
return
|
||||
@@ -1007,7 +1028,12 @@ func (cs *ConsensusState) isProposalComplete() bool {
|
||||
// is returned for convenience so we can log the proposal block.
|
||||
// Returns nil block upon error.
|
||||
// NOTE: keep it side-effect free for clarity.
|
||||
// CONTRACT: cs.privValidator is not nil.
|
||||
func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts *types.PartSet) {
|
||||
if cs.privValidator == nil {
|
||||
panic("entered createProposalBlock with privValidator being nil")
|
||||
}
|
||||
|
||||
var commit *types.Commit
|
||||
switch {
|
||||
case cs.Height == 1:
|
||||
@@ -1017,13 +1043,19 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts
|
||||
case cs.LastCommit.HasTwoThirdsMajority():
|
||||
// Make the commit from LastCommit
|
||||
commit = cs.LastCommit.MakeCommit()
|
||||
default:
|
||||
// This shouldn't happen.
|
||||
cs.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block.")
|
||||
default: // This shouldn't happen.
|
||||
cs.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block")
|
||||
return
|
||||
}
|
||||
|
||||
proposerAddr := cs.privValidator.GetPubKey().Address()
|
||||
if cs.privValidatorPubKey == nil {
|
||||
// If this node is a validator & proposer in the current round, it will
|
||||
// miss the opportunity to create a block.
|
||||
cs.Logger.Error(fmt.Sprintf("enterPropose: %v", errPubKeyIsNotSet))
|
||||
return
|
||||
}
|
||||
proposerAddr := cs.privValidatorPubKey.Address()
|
||||
|
||||
return cs.blockExec.CreateProposalBlock(cs.Height, cs.state, commit, proposerAddr)
|
||||
}
|
||||
|
||||
@@ -1411,7 +1443,8 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
|
||||
// restart).
|
||||
endMsg := EndHeightMessage{height}
|
||||
if err := cs.wal.WriteSync(endMsg); err != nil { // NOTE: fsync
|
||||
panic(fmt.Sprintf("Failed to write %v msg to consensus wal due to %v. Check your FS and restart the node", endMsg, err))
|
||||
panic(fmt.Sprintf("Failed to write %v msg to consensus wal due to %v. Check your FS and restart the node",
|
||||
endMsg, err))
|
||||
}
|
||||
|
||||
fail.Fail() // XXX
|
||||
@@ -1445,6 +1478,11 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
|
||||
|
||||
fail.Fail() // XXX
|
||||
|
||||
// Private validator might have changed it's key pair => refetch pubkey.
|
||||
if err := cs.updatePrivValidatorPubKey(); err != nil {
|
||||
cs.Logger.Error("Can't get private validator pubkey", "err", err)
|
||||
}
|
||||
|
||||
// cs.StartTime is already set.
|
||||
// Schedule Round0 to start soon.
|
||||
cs.scheduleRound0(&cs.RoundState)
|
||||
@@ -1458,8 +1496,22 @@ func (cs *ConsensusState) finalizeCommit(height int64) {
|
||||
func (cs *ConsensusState) recordMetrics(height int64, block *types.Block) {
|
||||
cs.metrics.Validators.Set(float64(cs.Validators.Size()))
|
||||
cs.metrics.ValidatorsPower.Set(float64(cs.Validators.TotalVotingPower()))
|
||||
missingValidators := 0
|
||||
missingValidatorsPower := int64(0)
|
||||
|
||||
var (
|
||||
missingValidators = 0
|
||||
missingValidatorsPower int64
|
||||
address types.Address
|
||||
)
|
||||
|
||||
if cs.privValidator != nil {
|
||||
if cs.privValidatorPubKey == nil {
|
||||
// Metrics won't be updated, but it's not critical.
|
||||
cs.Logger.Error(fmt.Sprintf("recordMetrics: %v", errPubKeyIsNotSet))
|
||||
} else {
|
||||
address = cs.privValidatorPubKey.Address()
|
||||
}
|
||||
}
|
||||
|
||||
for i, val := range cs.Validators.Validators {
|
||||
var vote *types.CommitSig
|
||||
if i < len(block.LastCommit.Precommits) {
|
||||
@@ -1469,9 +1521,22 @@ func (cs *ConsensusState) recordMetrics(height int64, block *types.Block) {
|
||||
missingValidators++
|
||||
missingValidatorsPower += val.VotingPower
|
||||
}
|
||||
|
||||
if bytes.Equal(val.Address, address) {
|
||||
label := []string{
|
||||
"validator_address", val.Address.String(),
|
||||
}
|
||||
cs.metrics.ValidatorPower.With(label...).Set(float64(val.VotingPower))
|
||||
if vote != nil {
|
||||
cs.metrics.ValidatorLastSignedHeight.With(label...).Set(float64(height))
|
||||
} else {
|
||||
cs.metrics.ValidatorMissedBlocks.With(label...).Add(float64(1))
|
||||
}
|
||||
}
|
||||
}
|
||||
cs.metrics.MissingValidators.Set(float64(missingValidators))
|
||||
cs.metrics.MissingValidatorsPower.Set(float64(missingValidatorsPower))
|
||||
|
||||
cs.metrics.ByzantineValidators.Set(float64(len(block.Evidence.Evidence)))
|
||||
byzantineValidatorsPower := int64(0)
|
||||
for _, ev := range block.Evidence.Evidence {
|
||||
@@ -1492,7 +1557,6 @@ func (cs *ConsensusState) recordMetrics(height int64, block *types.Block) {
|
||||
cs.metrics.BlockSizeBytes.Set(float64(block.Size()))
|
||||
cs.metrics.TotalTxs.Set(float64(block.TotalTxs))
|
||||
cs.metrics.CommittedHeight.Set(float64(block.Height))
|
||||
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@@ -1613,16 +1677,15 @@ func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, err
|
||||
if err == ErrVoteHeightMismatch {
|
||||
return added, err
|
||||
} else if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok {
|
||||
addr := cs.privValidator.GetPubKey().Address()
|
||||
if bytes.Equal(vote.ValidatorAddress, addr) {
|
||||
if cs.privValidatorPubKey == nil {
|
||||
return false, errPubKeyIsNotSet
|
||||
}
|
||||
if bytes.Equal(vote.ValidatorAddress, cs.privValidatorPubKey.Address()) {
|
||||
cs.Logger.Error(
|
||||
"Found conflicting vote from ourselves. Did you unsafe_reset a validator?",
|
||||
"height",
|
||||
vote.Height,
|
||||
"round",
|
||||
vote.Round,
|
||||
"type",
|
||||
vote.Type)
|
||||
"height", vote.Height,
|
||||
"round", vote.Round,
|
||||
"type", vote.Type)
|
||||
return added, err
|
||||
}
|
||||
cs.evpool.AddEvidence(voteErr.DuplicateVoteEvidence)
|
||||
@@ -1647,14 +1710,10 @@ func (cs *ConsensusState) addVote(
|
||||
peerID p2p.ID) (added bool, err error) {
|
||||
cs.Logger.Debug(
|
||||
"addVote",
|
||||
"voteHeight",
|
||||
vote.Height,
|
||||
"voteType",
|
||||
vote.Type,
|
||||
"valIndex",
|
||||
vote.ValidatorIndex,
|
||||
"csHeight",
|
||||
cs.Height)
|
||||
"voteHeight", vote.Height,
|
||||
"voteType", vote.Type,
|
||||
"valIndex", vote.ValidatorIndex,
|
||||
"csHeight", cs.Height)
|
||||
|
||||
// A precommit for the previous height?
|
||||
// These come in while we wait timeoutCommit
|
||||
@@ -1808,7 +1867,10 @@ func (cs *ConsensusState) signVote(
|
||||
// and the privValidator will refuse to sign anything.
|
||||
cs.wal.FlushAndSync()
|
||||
|
||||
addr := cs.privValidator.GetPubKey().Address()
|
||||
if cs.privValidatorPubKey == nil {
|
||||
return nil, errPubKeyIsNotSet
|
||||
}
|
||||
addr := cs.privValidatorPubKey.Address()
|
||||
valIndex, _ := cs.Validators.GetByAddress(addr)
|
||||
|
||||
vote := &types.Vote{
|
||||
@@ -1845,8 +1907,18 @@ func (cs *ConsensusState) voteTime() time.Time {
|
||||
|
||||
// sign the vote and publish on internalMsgQueue
|
||||
func (cs *ConsensusState) signAddVote(type_ types.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote {
|
||||
// if we don't have a key or we're not in the validator set, do nothing
|
||||
if cs.privValidator == nil || !cs.Validators.HasAddress(cs.privValidator.GetPubKey().Address()) {
|
||||
if cs.privValidator == nil { // the node does not have a key
|
||||
return nil
|
||||
}
|
||||
|
||||
if cs.privValidatorPubKey == nil {
|
||||
// Vote won't be signed, but it's not critical.
|
||||
cs.Logger.Error(fmt.Sprintf("signAddVote: %v", errPubKeyIsNotSet))
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the node not in the validator set, do nothing.
|
||||
if !cs.Validators.HasAddress(cs.privValidatorPubKey.Address()) {
|
||||
return nil
|
||||
}
|
||||
vote, err := cs.signVote(type_, hash, header)
|
||||
@@ -1861,6 +1933,18 @@ func (cs *ConsensusState) signAddVote(type_ types.SignedMsgType, hash []byte, he
|
||||
return nil
|
||||
}
|
||||
|
||||
// updatePrivValidatorPubKey get's the private validator public key and
|
||||
// memoizes it. This func returns an error if the private validator is not
|
||||
// responding or responds with an error.
|
||||
func (cs *ConsensusState) updatePrivValidatorPubKey() error {
|
||||
if cs.privValidator == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
cs.privValidatorPubKey = cs.privValidator.GetPubKey()
|
||||
return nil
|
||||
}
|
||||
|
||||
//---------------------------------------------------------
|
||||
|
||||
func CompareHRS(h1 int64, r1 int, s1 cstypes.RoundStepType, h2 int64, r2 int, s2 cstypes.RoundStepType) int {
|
||||
|
||||
@@ -203,7 +203,9 @@ func (wal *baseWAL) WriteSync(msg WALMessage) error {
|
||||
}
|
||||
|
||||
if err := wal.FlushAndSync(); err != nil {
|
||||
wal.Logger.Error("WriteSync failed to flush consensus wal. WARNING: may result in creating alternative proposals / votes for the current height iff the node restarted",
|
||||
wal.Logger.Error(
|
||||
`WriteSync failed to flush consensus wal. WARNING: may result in creating alternative proposals / votes
|
||||
for the current height iff the node restarted`,
|
||||
"err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"io"
|
||||
"math/big"
|
||||
|
||||
"golang.org/x/crypto/ripemd160"
|
||||
"golang.org/x/crypto/ripemd160" // nolint
|
||||
|
||||
secp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
module.exports = {
|
||||
theme: "cosmos",
|
||||
theme: 'cosmos',
|
||||
title: 'Tendermint Core',
|
||||
// locales: {
|
||||
// "/": {
|
||||
// lang: "en-US"
|
||||
@@ -10,136 +11,161 @@ module.exports = {
|
||||
// },
|
||||
base: process.env.VUEPRESS_BASE,
|
||||
themeConfig: {
|
||||
docsRepo: "tendermint/tendermint",
|
||||
repo: 'tendermint/tendermint',
|
||||
docsRepo: 'tendermint/tendermint',
|
||||
docsDir: 'docs',
|
||||
editLinks: true,
|
||||
docsDir: "docs",
|
||||
logo: "/logo.svg",
|
||||
label: "core",
|
||||
gutter: {
|
||||
title: "Help & Support",
|
||||
editLink: true,
|
||||
chat: {
|
||||
title: "Riot Chat",
|
||||
text: "Chat with Tendermint developers on Riot Chat.",
|
||||
url: "https://riot.im/app/#/room/#tendermint:matrix.org",
|
||||
bg: "#222"
|
||||
},
|
||||
forum: {
|
||||
title: "Tendermint Forum",
|
||||
text: "Join the Tendermint forum to learn more",
|
||||
url: "https://forum.cosmos.network/c/tendermint",
|
||||
bg: "#0B7E0B",
|
||||
logo: "tendermint"
|
||||
},
|
||||
github: {
|
||||
title: "Found an Issue?",
|
||||
text: "Help us improve this page by suggesting edits on GitHub."
|
||||
}
|
||||
label: 'core',
|
||||
algolia: {
|
||||
id: "BH4D9OD16A",
|
||||
key: "59f0e2deb984aa9cdf2b3a5fd24ac501",
|
||||
index: "tendermint"
|
||||
},
|
||||
footer: {
|
||||
logo: "/logo-bw.svg",
|
||||
textLink: {
|
||||
text: "tendermint.com",
|
||||
url: "https://tendermint.com"
|
||||
versions: [
|
||||
{
|
||||
"label": "v0.32",
|
||||
"key": "v0.32"
|
||||
},
|
||||
services: [
|
||||
{
|
||||
"label": "v0.33",
|
||||
"key": "v0.33"
|
||||
},
|
||||
{
|
||||
"label": "v0.34",
|
||||
"key": "v0.34"
|
||||
},
|
||||
{
|
||||
"label": "master",
|
||||
"key": "master"
|
||||
}
|
||||
],
|
||||
topbar: {
|
||||
banner: false,
|
||||
},
|
||||
sidebar: {
|
||||
auto: true,
|
||||
nav: [
|
||||
{
|
||||
service: "medium",
|
||||
url: "https://medium.com/@tendermint"
|
||||
},
|
||||
{
|
||||
service: "twitter",
|
||||
url: "https://twitter.com/tendermint_team"
|
||||
},
|
||||
{
|
||||
service: "linkedin",
|
||||
url: "https://www.linkedin.com/company/tendermint/"
|
||||
},
|
||||
{
|
||||
service: "reddit",
|
||||
url: "https://reddit.com/r/cosmosnetwork"
|
||||
},
|
||||
{
|
||||
service: "telegram",
|
||||
url: "https://t.me/cosmosproject"
|
||||
},
|
||||
{
|
||||
service: "youtube",
|
||||
url: "https://www.youtube.com/c/CosmosProject"
|
||||
}
|
||||
],
|
||||
smallprint:
|
||||
"The development of the Tendermint project is led primarily by Tendermint Inc., the for-profit entity which also maintains this website. Funding for this development comes primarily from the Interchain Foundation, a Swiss non-profit.",
|
||||
links: [
|
||||
{
|
||||
title: "Documentation",
|
||||
title: 'Resources',
|
||||
children: [
|
||||
{
|
||||
title: "Cosmos SDK",
|
||||
url: "https://cosmos.network/docs"
|
||||
title: 'Developer Sessions',
|
||||
path: '/DEV_SESSIONS.html'
|
||||
},
|
||||
{
|
||||
title: "Cosmos Hub",
|
||||
url: "https://hub.cosmos.network/"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
title: "Community",
|
||||
children: [
|
||||
{
|
||||
title: "Tendermint blog",
|
||||
url: "https://medium.com/@tendermint"
|
||||
title: 'RPC',
|
||||
path: 'https://docs.tendermint.com/master/rpc/',
|
||||
static: true
|
||||
},
|
||||
{
|
||||
title: "Forum",
|
||||
url: "https://forum.cosmos.network/c/tendermint"
|
||||
},
|
||||
{
|
||||
title: "Chat",
|
||||
url: "https://riot.im/app/#/room/#tendermint:matrix.org"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
title: "Contributing",
|
||||
children: [
|
||||
{
|
||||
title: "Contributing to the docs",
|
||||
url: "https://github.com/tendermint/tendermint"
|
||||
},
|
||||
{
|
||||
title: "Source code on GitHub",
|
||||
url: "https://github.com/tendermint/tendermint"
|
||||
},
|
||||
{
|
||||
title: "Careers at Tendermint",
|
||||
url: "https://tendermint.com/careers"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
sidebar: [
|
||||
gutter: {
|
||||
title: 'Help & Support',
|
||||
editLink: true,
|
||||
forum: {
|
||||
title: 'Tendermint Forum',
|
||||
text: 'Join the Tendermint forum to learn more',
|
||||
url: 'https://forum.cosmos.network/c/tendermint',
|
||||
bg: '#0B7E0B',
|
||||
logo: 'tendermint'
|
||||
},
|
||||
github: {
|
||||
title: 'Found an Issue?',
|
||||
text: 'Help us improve this page by suggesting edits on GitHub.'
|
||||
}
|
||||
},
|
||||
footer: {
|
||||
question: {
|
||||
text: 'Chat with Tendermint developers in <a href=\'https://discord.gg/vcExX9T\' target=\'_blank\'>Discord</a> or reach out on the <a href=\'https://forum.cosmos.network/c/tendermint\' target=\'_blank\'>Tendermint Forum</a> to learn more.'
|
||||
},
|
||||
logo: '/logo-bw.svg',
|
||||
textLink: {
|
||||
text: 'tendermint.com',
|
||||
url: 'https://tendermint.com'
|
||||
},
|
||||
services: [
|
||||
{
|
||||
service: 'medium',
|
||||
url: 'https://medium.com/@tendermint'
|
||||
},
|
||||
{
|
||||
service: 'twitter',
|
||||
url: 'https://twitter.com/tendermint_team'
|
||||
},
|
||||
{
|
||||
service: 'linkedin',
|
||||
url: 'https://www.linkedin.com/company/tendermint/'
|
||||
},
|
||||
{
|
||||
service: 'reddit',
|
||||
url: 'https://reddit.com/r/cosmosnetwork'
|
||||
},
|
||||
{
|
||||
service: 'telegram',
|
||||
url: 'https://t.me/cosmosproject'
|
||||
},
|
||||
{
|
||||
service: 'youtube',
|
||||
url: 'https://www.youtube.com/c/CosmosProject'
|
||||
}
|
||||
],
|
||||
smallprint:
|
||||
'The development of Tendermint Core is led primarily by [Interchain GmbH](https://interchain.berlin/). Funding for this development comes primarily from the Interchain Foundation, a Swiss non-profit. The Tendermint trademark is owned by Tendermint Inc, the for-profit entity that also maintains this website.',
|
||||
links: [
|
||||
{
|
||||
title: 'Documentation',
|
||||
children: [
|
||||
{
|
||||
title: 'Cosmos SDK',
|
||||
url: 'https://docs.cosmos.network'
|
||||
},
|
||||
{
|
||||
title: 'Cosmos Hub',
|
||||
url: 'https://hub.cosmos.network'
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
title: 'Community',
|
||||
children: [
|
||||
{
|
||||
title: 'Tendermint blog',
|
||||
url: 'https://medium.com/@tendermint'
|
||||
},
|
||||
{
|
||||
title: 'Forum',
|
||||
url: 'https://forum.cosmos.network/c/tendermint'
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
title: 'Contributing',
|
||||
children: [
|
||||
{
|
||||
title: 'Contributing to the docs',
|
||||
url: 'https://github.com/tendermint/tendermint'
|
||||
},
|
||||
{
|
||||
title: 'Source code on GitHub',
|
||||
url: 'https://github.com/tendermint/tendermint'
|
||||
},
|
||||
{
|
||||
title: 'Careers at Tendermint',
|
||||
url: 'https://tendermint.com/careers'
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
plugins: [
|
||||
[
|
||||
'@vuepress/google-analytics',
|
||||
{
|
||||
title: "Resources",
|
||||
children: [
|
||||
{
|
||||
title: "Developer Sessions",
|
||||
path: "/DEV_SESSIONS.html"
|
||||
},
|
||||
{
|
||||
title: "RPC",
|
||||
path: "/rpc/",
|
||||
static: true
|
||||
}
|
||||
]
|
||||
ga: 'UA-51029217-11'
|
||||
}
|
||||
]
|
||||
},
|
||||
markdown: {
|
||||
anchor: {
|
||||
permalinkSymbol: ""
|
||||
}
|
||||
}
|
||||
]
|
||||
};
|
||||
|
||||
7112
docs/package-lock.json
generated
7112
docs/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -4,7 +4,7 @@
|
||||
"description": "Welcome to the Tendermint Core documentation!",
|
||||
"main": "index.js",
|
||||
"dependencies": {
|
||||
"vuepress-theme-cosmos": "^1.0.54"
|
||||
"vuepress-theme-cosmos": "^1.0.180"
|
||||
},
|
||||
"devDependencies": {},
|
||||
"scripts": {
|
||||
|
||||
@@ -18,34 +18,37 @@ Listen address can be changed in the config file (see
|
||||
|
||||
The following metrics are available:
|
||||
|
||||
| **Name** | **Type** | **Since** | **Tags** | **Description** |
|
||||
| --------------------------------------- | --------- | --------- | -------------- | --------------------------------------------------------------- |
|
||||
| consensus\_height | Gauge | 0.21.0 | | Height of the chain |
|
||||
| consensus\_validators | Gauge | 0.21.0 | | Number of validators |
|
||||
| consensus\_validators\_power | Gauge | 0.21.0 | | Total voting power of all validators |
|
||||
| consensus\_missing\_validators | Gauge | 0.21.0 | | Number of validators who did not sign |
|
||||
| consensus\_missing\_validators\_power | Gauge | 0.21.0 | | Total voting power of the missing validators |
|
||||
| consensus\_byzantine\_validators | Gauge | 0.21.0 | | Number of validators who tried to double sign |
|
||||
| consensus\_byzantine\_validators\_power | Gauge | 0.21.0 | | Total voting power of the byzantine validators |
|
||||
| consensus\_block\_interval\_seconds | Histogram | 0.21.0 | | Time between this and last block (Block.Header.Time) in seconds |
|
||||
| consensus\_rounds | Gauge | 0.21.0 | | Number of rounds |
|
||||
| consensus\_num\_txs | Gauge | 0.21.0 | | Number of transactions |
|
||||
| consensus\_block\_parts | counter | on dev | peer\_id | number of blockparts transmitted by peer |
|
||||
| consensus\_latest\_block\_height | gauge | on dev | | /status sync\_info number |
|
||||
| consensus\_fast\_syncing | gauge | on dev | | either 0 (not fast syncing) or 1 (syncing) |
|
||||
| consensus\_total\_txs | Gauge | 0.21.0 | | Total number of transactions committed |
|
||||
| consensus\_block\_size\_bytes | Gauge | 0.21.0 | | Block size in bytes |
|
||||
| p2p\_peers | Gauge | 0.21.0 | | Number of peers node's connected to |
|
||||
| p2p\_peer\_receive\_bytes\_total | counter | on dev | peer\_id, chID | number of bytes per channel received from a given peer |
|
||||
| p2p\_peer\_send\_bytes\_total | counter | on dev | peer\_id, chID | number of bytes per channel sent to a given peer |
|
||||
| p2p\_peer\_pending\_send\_bytes | gauge | on dev | peer\_id | number of pending bytes to be sent to a given peer |
|
||||
| p2p\_num\_txs | gauge | on dev | peer\_id | number of transactions submitted by each peer\_id |
|
||||
| p2p\_pending\_send\_bytes | gauge | on dev | peer\_id | amount of data pending to be sent to peer |
|
||||
| mempool\_size | Gauge | 0.21.0 | | Number of uncommitted transactions |
|
||||
| mempool\_tx\_size\_bytes | histogram | on dev | | transaction sizes in bytes |
|
||||
| mempool\_failed\_txs | counter | on dev | | number of failed transactions |
|
||||
| mempool\_recheck\_times | counter | on dev | | number of transactions rechecked in the mempool |
|
||||
| state\_block\_processing\_time | histogram | on dev | | time between BeginBlock and EndBlock in ms |
|
||||
| **Name** | **Type** | **Since** | **Tags** | **Description** |
|
||||
| -------------------------------------- | --------- | --------- | ------------- | ---------------------------------------------------------------------- |
|
||||
| consensus_height | Gauge | 0.21.0 | | Height of the chain |
|
||||
| consensus_validators | Gauge | 0.21.0 | | Number of validators |
|
||||
| consensus_validators_power | Gauge | 0.21.0 | | Total voting power of all validators |
|
||||
| consensus_validator_power | Gauge | 0.33.0 | | Voting power of the node if in the validator set |
|
||||
| consensus_validator_last_signed_height | Gauge | 0.33.0 | | Last height the node signed a block, if the node is a validator |
|
||||
| consensus_validator_missed_blocks | Gauge | 0.33.0 | | Total amount of blocks missed for the node, if the node is a validator |
|
||||
| consensus_missing_validators | Gauge | 0.21.0 | | Number of validators who did not sign |
|
||||
| consensus_missing_validators_power | Gauge | 0.21.0 | | Total voting power of the missing validators |
|
||||
| consensus_byzantine_validators | Gauge | 0.21.0 | | Number of validators who tried to double sign |
|
||||
| consensus_byzantine_validators_power | Gauge | 0.21.0 | | Total voting power of the byzantine validators |
|
||||
| consensus_block_interval_seconds | Histogram | 0.21.0 | | Time between this and last block (Block.Header.Time) in seconds |
|
||||
| consensus_rounds | Gauge | 0.21.0 | | Number of rounds |
|
||||
| consensus_num_txs | Gauge | 0.21.0 | | Number of transactions |
|
||||
| consensus_total_txs | Gauge | 0.21.0 | | Total number of transactions committed |
|
||||
| consensus_block_parts | counter | on dev | peer_id | number of blockparts transmitted by peer |
|
||||
| consensus_latest_block_height | gauge | on dev | | /status sync_info number |
|
||||
| consensus_fast_syncing | gauge | on dev | | either 0 (not fast syncing) or 1 (syncing) |
|
||||
| consensus_block_size_bytes | Gauge | 0.21.0 | | Block size in bytes |
|
||||
| p2p_peers | Gauge | 0.21.0 | | Number of peers node's connected to |
|
||||
| p2p_peer_receive_bytes_total | counter | on dev | peer_id, chID | number of bytes per channel received from a given peer |
|
||||
| p2p_peer_send_bytes_total | counter | on dev | peer_id, chID | number of bytes per channel sent to a given peer |
|
||||
| p2p_peer_pending_send_bytes | gauge | on dev | peer_id | number of pending bytes to be sent to a given peer |
|
||||
| p2p_num_txs | gauge | on dev | peer_id | number of transactions submitted by each peer_id |
|
||||
| p2p_pending_send_bytes | gauge | on dev | peer_id | amount of data pending to be sent to peer |
|
||||
| mempool_size | Gauge | 0.21.0 | | Number of uncommitted transactions |
|
||||
| mempool_tx_size_bytes | histogram | on dev | | transaction sizes in bytes |
|
||||
| mempool_failed_txs | counter | on dev | | number of failed transactions |
|
||||
| mempool_recheck_times | counter | on dev | | number of transactions rechecked in the mempool |
|
||||
| state_block_processing_time | histogram | on dev | | time between BeginBlock and EndBlock in ms |
|
||||
|
||||
## Useful queries
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ func NewEvidenceReactor(evpool *EvidencePool) *EvidenceReactor {
|
||||
evR := &EvidenceReactor{
|
||||
evpool: evpool,
|
||||
}
|
||||
evR.BaseReactor = *p2p.NewBaseReactor("EvidenceReactor", evR)
|
||||
evR.BaseReactor = *p2p.NewBaseReactor("Evidence", evR)
|
||||
return evR
|
||||
}
|
||||
|
||||
@@ -65,7 +65,7 @@ func (evR *EvidenceReactor) AddPeer(peer p2p.Peer) {
|
||||
func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := decodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
|
||||
evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
evR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -60,10 +60,10 @@ func TestContains(t *testing.T) {
|
||||
func BenchmarkCMapHas(b *testing.B) {
|
||||
m := NewCMap()
|
||||
for i := 0; i < 1000; i++ {
|
||||
m.Set(string(i), i)
|
||||
m.Set(fmt.Sprint(i), i)
|
||||
}
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
m.Has(string(i))
|
||||
m.Has(fmt.Sprint(i))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -39,6 +39,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
|
||||
@@ -382,7 +382,7 @@ func benchmarkNClients(n int, b *testing.B) {
|
||||
s.PublishWithEvents(
|
||||
ctx,
|
||||
"Gamora",
|
||||
map[string][]string{"abci.Account.Owner": {"Ivan"}, "abci.Invoices.Number": {string(i)}},
|
||||
map[string][]string{"abci.Account.Owner": {"Ivan"}, "abci.Invoices.Number": {fmt.Sprint(i)}},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -678,9 +678,9 @@ func (cache *mapTxCache) Push(tx types.Tx) bool {
|
||||
|
||||
if cache.list.Len() >= cache.size {
|
||||
popped := cache.list.Front()
|
||||
poppedTxHash := popped.Value.([sha256.Size]byte)
|
||||
delete(cache.map_, poppedTxHash)
|
||||
if popped != nil {
|
||||
poppedTxHash := popped.Value.([sha256.Size]byte)
|
||||
delete(cache.map_, poppedTxHash)
|
||||
cache.list.Remove(popped)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,7 +47,7 @@ type mempoolIDs struct {
|
||||
activeIDs map[uint16]struct{} // used to check if a given peerID key is used, the value doesn't matter
|
||||
}
|
||||
|
||||
// Reserve searches for the next unused ID and assignes it to the
|
||||
// Reserve searches for the next unused ID and assigns it to the
|
||||
// peer.
|
||||
func (ids *mempoolIDs) ReserveForPeer(peer p2p.Peer) {
|
||||
ids.mtx.Lock()
|
||||
@@ -110,10 +110,16 @@ func NewReactor(config *cfg.MempoolConfig, mempool *CListMempool) *Reactor {
|
||||
mempool: mempool,
|
||||
ids: newMempoolIDs(),
|
||||
}
|
||||
memR.BaseReactor = *p2p.NewBaseReactor("Reactor", memR)
|
||||
memR.BaseReactor = *p2p.NewBaseReactor("Mempool", memR)
|
||||
return memR
|
||||
}
|
||||
|
||||
// InitPeer implements Reactor by creating a state for the peer.
|
||||
func (memR *Reactor) InitPeer(peer p2p.Peer) p2p.Peer {
|
||||
memR.ids.ReserveForPeer(peer)
|
||||
return peer
|
||||
}
|
||||
|
||||
// SetLogger sets the Logger on the reactor and the underlying mempool.
|
||||
func (memR *Reactor) SetLogger(l log.Logger) {
|
||||
memR.Logger = l
|
||||
@@ -142,7 +148,6 @@ func (memR *Reactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
// AddPeer implements Reactor.
|
||||
// It starts a broadcast routine ensuring all txs are forwarded to the given peer.
|
||||
func (memR *Reactor) AddPeer(peer p2p.Peer) {
|
||||
memR.ids.ReserveForPeer(peer)
|
||||
go memR.broadcastTxRoutine(peer)
|
||||
}
|
||||
|
||||
@@ -157,7 +162,7 @@ func (memR *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
func (memR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := memR.decodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
|
||||
memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
memR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -223,3 +223,21 @@ func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) {
|
||||
ids.ReserveForPeer(peer)
|
||||
})
|
||||
}
|
||||
|
||||
func TestDontExhaustMaxActiveIDs(t *testing.T) {
|
||||
config := cfg.TestConfig()
|
||||
const N = 1
|
||||
reactors := makeAndConnectReactors(config, N)
|
||||
defer func() {
|
||||
for _, r := range reactors {
|
||||
r.Stop()
|
||||
}
|
||||
}()
|
||||
reactor := reactors[0]
|
||||
|
||||
for i := 0; i < maxActiveIDs+1; i++ {
|
||||
peer := mock.NewPeer(nil)
|
||||
reactor.Receive(MempoolChannel, peer, []byte{0x1, 0x2, 0x3})
|
||||
reactor.AddPeer(peer)
|
||||
}
|
||||
}
|
||||
|
||||
19
node/node.go
19
node/node.go
@@ -467,6 +467,11 @@ func createTransport(
|
||||
}
|
||||
|
||||
p2p.MultiplexTransportConnFilters(connFilters...)(transport)
|
||||
|
||||
// Limit the number of incoming connections.
|
||||
max := config.P2P.MaxNumInboundPeers
|
||||
p2p.MultiplexTransportMaxIncomingConnections(max)(transport)
|
||||
|
||||
return transport, peerFilters
|
||||
}
|
||||
|
||||
@@ -1187,7 +1192,19 @@ func createAndStartPrivValidatorSocketClient(
|
||||
return nil, errors.Wrap(err, "failed to start private validator")
|
||||
}
|
||||
|
||||
return pvsc, nil
|
||||
// try to get a pubkey from private validate first time
|
||||
pubKey := pvsc.GetPubKey()
|
||||
if pubKey == nil {
|
||||
return nil, errors.New("could not retrieve public key from private validator")
|
||||
}
|
||||
|
||||
const (
|
||||
retries = 50 // 50 * 100ms = 5s total
|
||||
timeout = 100 * time.Millisecond
|
||||
)
|
||||
pvscWithRetries := privval.NewRetrySignerClient(pvsc, retries, timeout)
|
||||
|
||||
return pvscWithRetries, nil
|
||||
}
|
||||
|
||||
// splitAndTrimEmpty slices s into all subslices separated by sep and returns a
|
||||
|
||||
@@ -158,7 +158,7 @@ func TestNodeSetPrivValTCP(t *testing.T) {
|
||||
|
||||
n, err := DefaultNewNode(config, log.TestingLogger())
|
||||
require.NoError(t, err)
|
||||
assert.IsType(t, &privval.SignerClient{}, n.PrivValidator())
|
||||
assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator())
|
||||
}
|
||||
|
||||
// address without a protocol must result in error
|
||||
@@ -202,7 +202,7 @@ func TestNodeSetPrivValIPC(t *testing.T) {
|
||||
|
||||
n, err := DefaultNewNode(config, log.TestingLogger())
|
||||
require.NoError(t, err)
|
||||
assert.IsType(t, &privval.SignerClient{}, n.PrivValidator())
|
||||
assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator())
|
||||
}
|
||||
|
||||
// testFreeAddr claims a free port so we don't block on listener being ready.
|
||||
|
||||
@@ -380,7 +380,7 @@ func deriveSecretAndChallenge(
|
||||
copy(recvSecret[:], res[aeadKeySize:aeadKeySize*2])
|
||||
}
|
||||
|
||||
return
|
||||
return recvSecret, sendSecret, challenge
|
||||
}
|
||||
|
||||
// computeDHSecret computes a Diffie-Hellman shared secret key
|
||||
|
||||
@@ -313,21 +313,43 @@ var rfc4862 = net.IPNet{IP: net.ParseIP("FE80::"), Mask: net.CIDRMask(64, 128)}
|
||||
var rfc6052 = net.IPNet{IP: net.ParseIP("64:FF9B::"), Mask: net.CIDRMask(96, 128)}
|
||||
var rfc6145 = net.IPNet{IP: net.ParseIP("::FFFF:0:0:0"), Mask: net.CIDRMask(96, 128)}
|
||||
var zero4 = net.IPNet{IP: net.ParseIP("0.0.0.0"), Mask: net.CIDRMask(8, 32)}
|
||||
var (
|
||||
// onionCatNet defines the IPv6 address block used to support Tor.
|
||||
// bitcoind encodes a .onion address as a 16 byte number by decoding the
|
||||
// address prior to the .onion (i.e. the key hash) base32 into a ten
|
||||
// byte number. It then stores the first 6 bytes of the address as
|
||||
// 0xfd, 0x87, 0xd8, 0x7e, 0xeb, 0x43.
|
||||
//
|
||||
// This is the same range used by OnionCat, which is part part of the
|
||||
// RFC4193 unique local IPv6 range.
|
||||
//
|
||||
// In summary the format is:
|
||||
// { magic 6 bytes, 10 bytes base32 decode of key hash }
|
||||
onionCatNet = ipNet("fd87:d87e:eb43::", 48, 128)
|
||||
)
|
||||
|
||||
// ipNet returns a net.IPNet struct given the passed IP address string, number
|
||||
// of one bits to include at the start of the mask, and the total number of bits
|
||||
// for the mask.
|
||||
func ipNet(ip string, ones, bits int) net.IPNet {
|
||||
return net.IPNet{IP: net.ParseIP(ip), Mask: net.CIDRMask(ones, bits)}
|
||||
}
|
||||
|
||||
func (na *NetAddress) RFC1918() bool {
|
||||
return rfc1918_10.Contains(na.IP) ||
|
||||
rfc1918_192.Contains(na.IP) ||
|
||||
rfc1918_172.Contains(na.IP)
|
||||
}
|
||||
func (na *NetAddress) RFC3849() bool { return rfc3849.Contains(na.IP) }
|
||||
func (na *NetAddress) RFC3927() bool { return rfc3927.Contains(na.IP) }
|
||||
func (na *NetAddress) RFC3964() bool { return rfc3964.Contains(na.IP) }
|
||||
func (na *NetAddress) RFC4193() bool { return rfc4193.Contains(na.IP) }
|
||||
func (na *NetAddress) RFC4380() bool { return rfc4380.Contains(na.IP) }
|
||||
func (na *NetAddress) RFC4843() bool { return rfc4843.Contains(na.IP) }
|
||||
func (na *NetAddress) RFC4862() bool { return rfc4862.Contains(na.IP) }
|
||||
func (na *NetAddress) RFC6052() bool { return rfc6052.Contains(na.IP) }
|
||||
func (na *NetAddress) RFC6145() bool { return rfc6145.Contains(na.IP) }
|
||||
func (na *NetAddress) RFC3849() bool { return rfc3849.Contains(na.IP) }
|
||||
func (na *NetAddress) RFC3927() bool { return rfc3927.Contains(na.IP) }
|
||||
func (na *NetAddress) RFC3964() bool { return rfc3964.Contains(na.IP) }
|
||||
func (na *NetAddress) RFC4193() bool { return rfc4193.Contains(na.IP) }
|
||||
func (na *NetAddress) RFC4380() bool { return rfc4380.Contains(na.IP) }
|
||||
func (na *NetAddress) RFC4843() bool { return rfc4843.Contains(na.IP) }
|
||||
func (na *NetAddress) RFC4862() bool { return rfc4862.Contains(na.IP) }
|
||||
func (na *NetAddress) RFC6052() bool { return rfc6052.Contains(na.IP) }
|
||||
func (na *NetAddress) RFC6145() bool { return rfc6145.Contains(na.IP) }
|
||||
func (na *NetAddress) OnionCatTor() bool { return onionCatNet.Contains(na.IP) }
|
||||
|
||||
func removeProtocolIfDefined(addr string) string {
|
||||
if strings.Contains(addr, "://") {
|
||||
|
||||
@@ -768,31 +768,36 @@ func (a *addrBook) calcOldBucket(addr *p2p.NetAddress) int {
|
||||
}
|
||||
|
||||
// Return a string representing the network group of this address.
|
||||
// This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string
|
||||
// This is the /16 for IPv4 (e.g. 1.2.0.0), the /32 (/36 for he.net) for IPv6, the string
|
||||
// "local" for a local address and the string "unroutable" for an unroutable
|
||||
// address.
|
||||
func (a *addrBook) groupKey(na *p2p.NetAddress) string {
|
||||
if a.routabilityStrict && na.Local() {
|
||||
return groupKeyFor(na, a.routabilityStrict)
|
||||
}
|
||||
|
||||
func groupKeyFor(na *p2p.NetAddress, routabilityStrict bool) string {
|
||||
if routabilityStrict && na.Local() {
|
||||
return "local"
|
||||
}
|
||||
if a.routabilityStrict && !na.Routable() {
|
||||
if routabilityStrict && !na.Routable() {
|
||||
return "unroutable"
|
||||
}
|
||||
|
||||
if ipv4 := na.IP.To4(); ipv4 != nil {
|
||||
return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(16, 32)}).String()
|
||||
return na.IP.Mask(net.CIDRMask(16, 32)).String()
|
||||
}
|
||||
|
||||
if na.RFC6145() || na.RFC6052() {
|
||||
// last four bytes are the ip address
|
||||
ip := na.IP[12:16]
|
||||
return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
|
||||
return ip.Mask(net.CIDRMask(16, 32)).String()
|
||||
}
|
||||
|
||||
if na.RFC3964() {
|
||||
ip := na.IP[2:7]
|
||||
return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
|
||||
|
||||
ip := na.IP[2:6]
|
||||
return ip.Mask(net.CIDRMask(16, 32)).String()
|
||||
}
|
||||
|
||||
if na.RFC4380() {
|
||||
// teredo tunnels have the last 4 bytes as the v4 address XOR
|
||||
// 0xff.
|
||||
@@ -800,20 +805,24 @@ func (a *addrBook) groupKey(na *p2p.NetAddress) string {
|
||||
for i, byte := range na.IP[12:16] {
|
||||
ip[i] = byte ^ 0xff
|
||||
}
|
||||
return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String()
|
||||
return ip.Mask(net.CIDRMask(16, 32)).String()
|
||||
}
|
||||
|
||||
if na.OnionCatTor() {
|
||||
// group is keyed off the first 4 bits of the actual onion key.
|
||||
return fmt.Sprintf("tor:%d", na.IP[6]&((1<<4)-1))
|
||||
}
|
||||
|
||||
// OK, so now we know ourselves to be a IPv6 address.
|
||||
// bitcoind uses /32 for everything, except for Hurricane Electric's
|
||||
// (he.net) IP range, which it uses /36 for.
|
||||
bits := 32
|
||||
heNet := &net.IPNet{IP: net.ParseIP("2001:470::"),
|
||||
Mask: net.CIDRMask(32, 128)}
|
||||
heNet := &net.IPNet{IP: net.ParseIP("2001:470::"), Mask: net.CIDRMask(32, 128)}
|
||||
if heNet.Contains(na.IP) {
|
||||
bits = 36
|
||||
}
|
||||
|
||||
return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(bits, 128)}).String()
|
||||
ipv6Mask := net.CIDRMask(bits, 128)
|
||||
return na.IP.Mask(ipv6Mask).String()
|
||||
}
|
||||
|
||||
// doubleSha256 calculates sha256(sha256(b)) and returns the resulting bytes.
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"net"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
@@ -545,6 +546,73 @@ func TestMultipleAddrBookAddressSelection(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestAddrBookGroupKey(t *testing.T) {
|
||||
// non-strict routability
|
||||
testCases := []struct {
|
||||
name string
|
||||
ip string
|
||||
expKey string
|
||||
}{
|
||||
// IPv4 normal.
|
||||
{"ipv4 normal class a", "12.1.2.3", "12.1.0.0"},
|
||||
{"ipv4 normal class b", "173.1.2.3", "173.1.0.0"},
|
||||
{"ipv4 normal class c", "196.1.2.3", "196.1.0.0"},
|
||||
|
||||
// IPv6/IPv4 translations.
|
||||
{"ipv6 rfc3964 with ipv4 encap", "2002:0c01:0203::", "12.1.0.0"},
|
||||
{"ipv6 rfc4380 toredo ipv4", "2001:0:1234::f3fe:fdfc", "12.1.0.0"},
|
||||
{"ipv6 rfc6052 well-known prefix with ipv4", "64:ff9b::0c01:0203", "12.1.0.0"},
|
||||
{"ipv6 rfc6145 translated ipv4", "::ffff:0:0c01:0203", "12.1.0.0"},
|
||||
|
||||
// Tor.
|
||||
{"ipv6 tor onioncat", "fd87:d87e:eb43:1234::5678", "tor:2"},
|
||||
{"ipv6 tor onioncat 2", "fd87:d87e:eb43:1245::6789", "tor:2"},
|
||||
{"ipv6 tor onioncat 3", "fd87:d87e:eb43:1345::6789", "tor:3"},
|
||||
|
||||
// IPv6 normal.
|
||||
{"ipv6 normal", "2602:100::1", "2602:100::"},
|
||||
{"ipv6 normal 2", "2602:0100::1234", "2602:100::"},
|
||||
{"ipv6 hurricane electric", "2001:470:1f10:a1::2", "2001:470:1000::"},
|
||||
{"ipv6 hurricane electric 2", "2001:0470:1f10:a1::2", "2001:470:1000::"},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
nip := net.ParseIP(tc.ip)
|
||||
key := groupKeyFor(p2p.NewNetAddressIPPort(nip, 26656), false)
|
||||
assert.Equal(t, tc.expKey, key, "#%d", i)
|
||||
}
|
||||
|
||||
// strict routability
|
||||
testCases = []struct {
|
||||
name string
|
||||
ip string
|
||||
expKey string
|
||||
}{
|
||||
// Local addresses.
|
||||
{"ipv4 localhost", "127.0.0.1", "local"},
|
||||
{"ipv6 localhost", "::1", "local"},
|
||||
{"ipv4 zero", "0.0.0.0", "local"},
|
||||
{"ipv4 first octet zero", "0.1.2.3", "local"},
|
||||
|
||||
// Unroutable addresses.
|
||||
{"ipv4 invalid bcast", "255.255.255.255", "unroutable"},
|
||||
{"ipv4 rfc1918 10/8", "10.1.2.3", "unroutable"},
|
||||
{"ipv4 rfc1918 172.16/12", "172.16.1.2", "unroutable"},
|
||||
{"ipv4 rfc1918 192.168/16", "192.168.1.2", "unroutable"},
|
||||
{"ipv6 rfc3849 2001:db8::/32", "2001:db8::1234", "unroutable"},
|
||||
{"ipv4 rfc3927 169.254/16", "169.254.1.2", "unroutable"},
|
||||
{"ipv6 rfc4193 fc00::/7", "fc00::1234", "unroutable"},
|
||||
{"ipv6 rfc4843 2001:10::/28", "2001:10::1234", "unroutable"},
|
||||
{"ipv6 rfc4862 fe80::/64", "fe80::1234", "unroutable"},
|
||||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
nip := net.ParseIP(tc.ip)
|
||||
key := groupKeyFor(p2p.NewNetAddressIPPort(nip, 26656), true)
|
||||
assert.Equal(t, tc.expKey, key, "#%d", i)
|
||||
}
|
||||
}
|
||||
|
||||
func assertMOldAndNNewAddrsInSelection(t *testing.T, m, n int, addrs []*p2p.NetAddress, book *addrBook) {
|
||||
nOld, nNew := countOldAndNewAddrsInSelection(addrs, book)
|
||||
assert.Equal(t, m, nOld, "old addresses")
|
||||
|
||||
@@ -130,7 +130,7 @@ func NewPEXReactor(b AddrBook, config *PEXReactorConfig) *PEXReactor {
|
||||
lastReceivedRequests: cmn.NewCMap(),
|
||||
crawlPeerInfos: make(map[p2p.ID]crawlPeerInfo),
|
||||
}
|
||||
r.BaseReactor = *p2p.NewBaseReactor("PEXReactor", r)
|
||||
r.BaseReactor = *p2p.NewBaseReactor("PEX", r)
|
||||
return r
|
||||
}
|
||||
|
||||
@@ -227,7 +227,7 @@ func (r *PEXReactor) logErrAddrBook(err error) {
|
||||
func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) {
|
||||
msg, err := decodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
r.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
|
||||
r.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err)
|
||||
r.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/netutil"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/p2p/conn"
|
||||
@@ -122,11 +123,18 @@ func MultiplexTransportResolver(resolver IPResolver) MultiplexTransportOption {
|
||||
return func(mt *MultiplexTransport) { mt.resolver = resolver }
|
||||
}
|
||||
|
||||
// MultiplexTransportMaxIncomingConnections sets the maximum number of
|
||||
// simultaneous connections (incoming). Default: 0 (unlimited)
|
||||
func MultiplexTransportMaxIncomingConnections(n int) MultiplexTransportOption {
|
||||
return func(mt *MultiplexTransport) { mt.maxIncomingConnections = n }
|
||||
}
|
||||
|
||||
// MultiplexTransport accepts and dials tcp connections and upgrades them to
|
||||
// multiplexed peers.
|
||||
type MultiplexTransport struct {
|
||||
netAddr NetAddress
|
||||
listener net.Listener
|
||||
netAddr NetAddress
|
||||
listener net.Listener
|
||||
maxIncomingConnections int // see MaxIncomingConnections
|
||||
|
||||
acceptc chan accept
|
||||
closec chan struct{}
|
||||
@@ -240,6 +248,10 @@ func (mt *MultiplexTransport) Listen(addr NetAddress) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if mt.maxIncomingConnections > 0 {
|
||||
ln = netutil.LimitListener(ln, mt.maxIncomingConnections)
|
||||
}
|
||||
|
||||
mt.netAddr = addr
|
||||
mt.listener = ln
|
||||
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"math/rand"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -134,6 +135,50 @@ func TestTransportMultiplexConnFilterTimeout(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransportMultiplexMaxIncomingConnections(t *testing.T) {
|
||||
mt := newMultiplexTransport(
|
||||
emptyNodeInfo(),
|
||||
NodeKey{
|
||||
PrivKey: ed25519.GenPrivKey(),
|
||||
},
|
||||
)
|
||||
id := mt.nodeKey.ID()
|
||||
|
||||
MultiplexTransportMaxIncomingConnections(0)(mt)
|
||||
|
||||
addr, err := NewNetAddressString(IDAddressString(id, "127.0.0.1:0"))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := mt.Listen(*addr); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
errc := make(chan error)
|
||||
|
||||
go func() {
|
||||
addr := NewNetAddress(id, mt.listener.Addr())
|
||||
|
||||
_, err := addr.Dial()
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
}
|
||||
|
||||
close(errc)
|
||||
}()
|
||||
|
||||
if err := <-errc; err != nil {
|
||||
t.Errorf("connection failed: %v", err)
|
||||
}
|
||||
|
||||
_, err = mt.Accept(peerConfig{})
|
||||
if err == nil || !strings.Contains(err.Error(), "connection reset by peer") {
|
||||
t.Errorf("expected connection reset by peer error, got %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransportMultiplexAcceptMultiple(t *testing.T) {
|
||||
mt := testSetupMultiplexTransport(t)
|
||||
laddr := NewNetAddress(mt.nodeKey.ID(), mt.listener.Addr())
|
||||
@@ -210,6 +255,7 @@ func testDialer(dialAddr NetAddress, errc chan error) {
|
||||
}
|
||||
|
||||
func TestTransportMultiplexAcceptNonBlocking(t *testing.T) {
|
||||
t.Skip("")
|
||||
mt := testSetupMultiplexTransport(t)
|
||||
|
||||
var (
|
||||
|
||||
@@ -209,7 +209,7 @@ func getServiceURL(rootURL string) (url, urnDomain string, err error) {
|
||||
defer r.Body.Close() // nolint: errcheck
|
||||
|
||||
if r.StatusCode >= 400 {
|
||||
err = errors.New(string(r.StatusCode))
|
||||
err = errors.New(fmt.Sprint(r.StatusCode))
|
||||
return
|
||||
}
|
||||
var root Root
|
||||
@@ -378,7 +378,7 @@ func (n *upnpNAT) AddPortMapping(
|
||||
// fmt.Println(string(body), err)
|
||||
mappedExternalPort = externalPort
|
||||
_ = response
|
||||
return
|
||||
return mappedExternalPort, err
|
||||
}
|
||||
|
||||
func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort int) (err error) {
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
package privval
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// EndpointTimeoutError occurs when endpoint times out.
|
||||
type EndpointTimeoutError struct{}
|
||||
|
||||
// Implement the net.Error interface.
|
||||
@@ -13,15 +15,15 @@ func (e EndpointTimeoutError) Temporary() bool { return true }
|
||||
|
||||
// Socket errors.
|
||||
var (
|
||||
ErrUnexpectedResponse = fmt.Errorf("received unexpected response")
|
||||
ErrNoConnection = fmt.Errorf("endpoint is not connected")
|
||||
ErrConnectionTimeout = EndpointTimeoutError{}
|
||||
|
||||
ErrReadTimeout = fmt.Errorf("endpoint read timed out")
|
||||
ErrWriteTimeout = fmt.Errorf("endpoint write timed out")
|
||||
ErrNoConnection = errors.New("endpoint is not connected")
|
||||
ErrReadTimeout = errors.New("endpoint read timed out")
|
||||
ErrUnexpectedResponse = errors.New("empty response")
|
||||
ErrWriteTimeout = errors.New("endpoint write timed out")
|
||||
)
|
||||
|
||||
// RemoteSignerError allows (remote) validators to include meaningful error descriptions in their reply.
|
||||
// RemoteSignerError allows (remote) validators to include meaningful error
|
||||
// descriptions in their reply.
|
||||
type RemoteSignerError struct {
|
||||
// TODO(ismail): create an enum of known errors
|
||||
Code int
|
||||
|
||||
85
privval/retry_signer_client.go
Normal file
85
privval/retry_signer_client.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package privval
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// RetrySignerClient wraps SignerClient adding retry for each operation (except
|
||||
// Ping) w/ a timeout.
|
||||
type RetrySignerClient struct {
|
||||
next *SignerClient
|
||||
retries int
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
// NewRetrySignerClient returns RetrySignerClient. If +retries+ is 0, the
|
||||
// client will be retrying each operation indefinitely.
|
||||
func NewRetrySignerClient(sc *SignerClient, retries int, timeout time.Duration) *RetrySignerClient {
|
||||
return &RetrySignerClient{sc, retries, timeout}
|
||||
}
|
||||
|
||||
var _ types.PrivValidator = (*RetrySignerClient)(nil)
|
||||
|
||||
func (sc *RetrySignerClient) Close() error {
|
||||
return sc.next.Close()
|
||||
}
|
||||
|
||||
func (sc *RetrySignerClient) IsConnected() bool {
|
||||
return sc.next.IsConnected()
|
||||
}
|
||||
|
||||
func (sc *RetrySignerClient) WaitForConnection(maxWait time.Duration) error {
|
||||
return sc.next.WaitForConnection(maxWait)
|
||||
}
|
||||
|
||||
//--------------------------------------------------------
|
||||
// Implement PrivValidator
|
||||
|
||||
func (sc *RetrySignerClient) Ping() error {
|
||||
return sc.next.Ping()
|
||||
}
|
||||
|
||||
func (sc *RetrySignerClient) GetPubKey() crypto.PubKey {
|
||||
for i := 0; i < sc.retries || sc.retries == 0; i++ {
|
||||
pk := sc.next.GetPubKey()
|
||||
if pk != nil {
|
||||
return pk
|
||||
}
|
||||
time.Sleep(sc.timeout)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *RetrySignerClient) SignVote(chainID string, vote *types.Vote) error {
|
||||
for i := 0; i < sc.retries || sc.retries == 0; i++ {
|
||||
err := sc.next.SignVote(chainID, vote)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
// If remote signer errors, we don't retry.
|
||||
if _, ok := err.(*RemoteSignerError); ok {
|
||||
return err
|
||||
}
|
||||
time.Sleep(sc.timeout)
|
||||
}
|
||||
return errors.New("exhausted all attempts to sign vote")
|
||||
}
|
||||
|
||||
func (sc *RetrySignerClient) SignProposal(chainID string, proposal *types.Proposal) error {
|
||||
for i := 0; i < sc.retries || sc.retries == 0; i++ {
|
||||
err := sc.next.SignProposal(chainID, proposal)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
// If remote signer errors, we don't retry.
|
||||
if _, ok := err.(*RemoteSignerError); ok {
|
||||
return err
|
||||
}
|
||||
time.Sleep(sc.timeout)
|
||||
}
|
||||
return errors.New("exhausted all attempts to sign proposal")
|
||||
}
|
||||
@@ -50,7 +50,6 @@ func (sc *SignerClient) WaitForConnection(maxWait time.Duration) error {
|
||||
// Ping sends a ping request to the remote signer
|
||||
func (sc *SignerClient) Ping() error {
|
||||
response, err := sc.endpoint.SendRequest(&PingRequest{})
|
||||
|
||||
if err != nil {
|
||||
sc.endpoint.Logger.Error("SignerClient::Ping", "err", err)
|
||||
return nil
|
||||
@@ -58,7 +57,6 @@ func (sc *SignerClient) Ping() error {
|
||||
|
||||
_, ok := response.(*PingResponse)
|
||||
if !ok {
|
||||
sc.endpoint.Logger.Error("SignerClient::Ping", "err", "response != PingResponse")
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -91,16 +89,13 @@ func (sc *SignerClient) GetPubKey() crypto.PubKey {
|
||||
func (sc *SignerClient) SignVote(chainID string, vote *types.Vote) error {
|
||||
response, err := sc.endpoint.SendRequest(&SignVoteRequest{Vote: vote})
|
||||
if err != nil {
|
||||
sc.endpoint.Logger.Error("SignerClient::SignVote", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
resp, ok := response.(*SignedVoteResponse)
|
||||
if !ok {
|
||||
sc.endpoint.Logger.Error("SignerClient::GetPubKey", "err", "response != SignedVoteResponse")
|
||||
return ErrUnexpectedResponse
|
||||
}
|
||||
|
||||
if resp.Error != nil {
|
||||
return resp.Error
|
||||
}
|
||||
@@ -113,13 +108,11 @@ func (sc *SignerClient) SignVote(chainID string, vote *types.Vote) error {
|
||||
func (sc *SignerClient) SignProposal(chainID string, proposal *types.Proposal) error {
|
||||
response, err := sc.endpoint.SendRequest(&SignProposalRequest{Proposal: proposal})
|
||||
if err != nil {
|
||||
sc.endpoint.Logger.Error("SignerClient::SignProposal", "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
resp, ok := response.(*SignedProposalResponse)
|
||||
if !ok {
|
||||
sc.endpoint.Logger.Error("SignerClient::SignProposal", "err", "response != SignedProposalResponse")
|
||||
return ErrUnexpectedResponse
|
||||
}
|
||||
if resp.Error != nil {
|
||||
|
||||
@@ -250,8 +250,7 @@ func TestSignerUnexpectedResponse(t *testing.T) {
|
||||
|
||||
ts := time.Now()
|
||||
want := &types.Vote{Timestamp: ts, Type: types.PrecommitType}
|
||||
|
||||
e := tc.signerClient.SignVote(tc.chainID, want)
|
||||
assert.EqualError(t, e, "received unexpected response")
|
||||
assert.EqualError(t, e, "empty response")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,24 +15,26 @@ const (
|
||||
// SignerServiceEndpointOption sets an optional parameter on the SignerDialerEndpoint.
|
||||
type SignerServiceEndpointOption func(*SignerDialerEndpoint)
|
||||
|
||||
// SignerDialerEndpointTimeoutReadWrite sets the read and write timeout for connections
|
||||
// from external signing processes.
|
||||
// SignerDialerEndpointTimeoutReadWrite sets the read and write timeout for
|
||||
// connections from client processes.
|
||||
func SignerDialerEndpointTimeoutReadWrite(timeout time.Duration) SignerServiceEndpointOption {
|
||||
return func(ss *SignerDialerEndpoint) { ss.timeoutReadWrite = timeout }
|
||||
}
|
||||
|
||||
// SignerDialerEndpointConnRetries sets the amount of attempted retries to acceptNewConnection.
|
||||
// SignerDialerEndpointConnRetries sets the amount of attempted retries to
|
||||
// acceptNewConnection.
|
||||
func SignerDialerEndpointConnRetries(retries int) SignerServiceEndpointOption {
|
||||
return func(ss *SignerDialerEndpoint) { ss.maxConnRetries = retries }
|
||||
}
|
||||
|
||||
// SignerDialerEndpointRetryWaitInterval sets the retry wait interval to a custom value
|
||||
// SignerDialerEndpointRetryWaitInterval sets the retry wait interval to a
|
||||
// custom value.
|
||||
func SignerDialerEndpointRetryWaitInterval(interval time.Duration) SignerServiceEndpointOption {
|
||||
return func(ss *SignerDialerEndpoint) { ss.retryWait = interval }
|
||||
}
|
||||
|
||||
// SignerDialerEndpoint dials using its dialer and responds to any
|
||||
// signature requests using its privVal.
|
||||
// SignerDialerEndpoint dials using its dialer and responds to any signature
|
||||
// requests using its privVal.
|
||||
type SignerDialerEndpoint struct {
|
||||
signerEndpoint
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultTimeoutReadWriteSeconds = 3
|
||||
defaultTimeoutReadWriteSeconds = 5
|
||||
)
|
||||
|
||||
type signerEndpoint struct {
|
||||
|
||||
@@ -10,11 +10,22 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
// SignerValidatorEndpointOption sets an optional parameter on the SocketVal.
|
||||
type SignerValidatorEndpointOption func(*SignerListenerEndpoint)
|
||||
// SignerListenerEndpointOption sets an optional parameter on the SignerListenerEndpoint.
|
||||
type SignerListenerEndpointOption func(*SignerListenerEndpoint)
|
||||
|
||||
// SignerListenerEndpoint listens for an external process to dial in
|
||||
// and keeps the connection alive by dropping and reconnecting
|
||||
// SignerListenerEndpointTimeoutReadWrite sets the read and write timeout for
|
||||
// connections from external signing processes.
|
||||
//
|
||||
// Default: 5s
|
||||
func SignerListenerEndpointTimeoutReadWrite(timeout time.Duration) SignerListenerEndpointOption {
|
||||
return func(sl *SignerListenerEndpoint) { sl.signerEndpoint.timeoutReadWrite = timeout }
|
||||
}
|
||||
|
||||
// SignerListenerEndpoint listens for an external process to dial in and keeps
|
||||
// the connection alive by dropping and reconnecting.
|
||||
//
|
||||
// The process will send pings every ~3s (read/write timeout * 2/3) to keep the
|
||||
// connection alive.
|
||||
type SignerListenerEndpoint struct {
|
||||
signerEndpoint
|
||||
|
||||
@@ -24,6 +35,7 @@ type SignerListenerEndpoint struct {
|
||||
|
||||
timeoutAccept time.Duration
|
||||
pingTimer *time.Ticker
|
||||
pingInterval time.Duration
|
||||
|
||||
instanceMtx sync.Mutex // Ensures instance public methods access, i.e. SendRequest
|
||||
}
|
||||
@@ -32,15 +44,21 @@ type SignerListenerEndpoint struct {
|
||||
func NewSignerListenerEndpoint(
|
||||
logger log.Logger,
|
||||
listener net.Listener,
|
||||
options ...SignerListenerEndpointOption,
|
||||
) *SignerListenerEndpoint {
|
||||
sc := &SignerListenerEndpoint{
|
||||
sl := &SignerListenerEndpoint{
|
||||
listener: listener,
|
||||
timeoutAccept: defaultTimeoutAcceptSeconds * time.Second,
|
||||
}
|
||||
|
||||
sc.BaseService = *cmn.NewBaseService(logger, "SignerListenerEndpoint", sc)
|
||||
sc.signerEndpoint.timeoutReadWrite = defaultTimeoutReadWriteSeconds * time.Second
|
||||
return sc
|
||||
sl.BaseService = *cmn.NewBaseService(logger, "SignerListenerEndpoint", sl)
|
||||
sl.signerEndpoint.timeoutReadWrite = defaultTimeoutReadWriteSeconds * time.Second
|
||||
|
||||
for _, optionFunc := range options {
|
||||
optionFunc(sl)
|
||||
}
|
||||
|
||||
return sl
|
||||
}
|
||||
|
||||
// OnStart implements cmn.Service.
|
||||
@@ -48,7 +66,9 @@ func (sl *SignerListenerEndpoint) OnStart() error {
|
||||
sl.connectRequestCh = make(chan struct{})
|
||||
sl.connectionAvailableCh = make(chan net.Conn)
|
||||
|
||||
sl.pingTimer = time.NewTicker(defaultPingPeriodMilliseconds * time.Millisecond)
|
||||
// NOTE: ping timeout must be less than read/write timeout
|
||||
sl.pingInterval = time.Duration(sl.signerEndpoint.timeoutReadWrite.Milliseconds()*2/3) * time.Millisecond
|
||||
sl.pingTimer = time.NewTicker(sl.pingInterval)
|
||||
|
||||
go sl.serviceLoop()
|
||||
go sl.pingLoop()
|
||||
@@ -116,6 +136,7 @@ func (sl *SignerListenerEndpoint) ensureConnection(maxWait time.Duration) error
|
||||
}
|
||||
|
||||
// block until connected or timeout
|
||||
sl.Logger.Info("SignerListener: Blocking for connection")
|
||||
sl.triggerConnect()
|
||||
err := sl.WaitConnection(sl.connectionAvailableCh, maxWait)
|
||||
if err != nil {
|
||||
|
||||
@@ -155,7 +155,11 @@ func newSignerListenerEndpoint(logger log.Logger, addr string, timeoutReadWrite
|
||||
listener = tcpLn
|
||||
}
|
||||
|
||||
return NewSignerListenerEndpoint(logger, listener)
|
||||
return NewSignerListenerEndpoint(
|
||||
logger,
|
||||
listener,
|
||||
SignerListenerEndpointTimeoutReadWrite(testTimeoutReadWrite),
|
||||
)
|
||||
}
|
||||
|
||||
func startListenerEndpointAsync(t *testing.T, sle *SignerListenerEndpoint, endpointIsOpenCh chan struct{}) {
|
||||
|
||||
@@ -9,8 +9,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultTimeoutAcceptSeconds = 3
|
||||
defaultPingPeriodMilliseconds = 100
|
||||
defaultTimeoutAcceptSeconds = 3
|
||||
)
|
||||
|
||||
// timeoutError can be used to check if an error returned from the netp package
|
||||
|
||||
@@ -28,6 +28,55 @@ const (
|
||||
protoTCP = "tcp"
|
||||
)
|
||||
|
||||
// Parsed URL structure
|
||||
type parsedURL struct {
|
||||
url.URL
|
||||
}
|
||||
|
||||
// Parse URL and set defaults
|
||||
func newParsedURL(remoteAddr string) (*parsedURL, error) {
|
||||
u, err := url.Parse(remoteAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// default to tcp if nothing specified
|
||||
if u.Scheme == "" {
|
||||
u.Scheme = protoTCP
|
||||
}
|
||||
|
||||
return &parsedURL{*u}, nil
|
||||
}
|
||||
|
||||
// Change protocol to HTTP for unknown protocols and TCP protocol - useful for RPC connections
|
||||
func (u *parsedURL) SetDefaultSchemeHTTP() {
|
||||
// protocol to use for http operations, to support both http and https
|
||||
switch u.Scheme {
|
||||
case protoHTTP, protoHTTPS, protoWS, protoWSS:
|
||||
// known protocols not changed
|
||||
default:
|
||||
// default to http for unknown protocols (ex. tcp)
|
||||
u.Scheme = protoHTTP
|
||||
}
|
||||
}
|
||||
|
||||
// Get full address without the protocol - useful for Dialer connections
|
||||
func (u parsedURL) GetHostWithPath() string {
|
||||
// Remove protocol, userinfo and # fragment, assume opaque is empty
|
||||
return u.Host + u.EscapedPath()
|
||||
}
|
||||
|
||||
// Get a trimmed address - useful for WS connections
|
||||
func (u parsedURL) GetTrimmedHostWithPath() string {
|
||||
// replace / with . for http requests (kvstore domain)
|
||||
return strings.Replace(u.GetHostWithPath(), "/", ".", -1)
|
||||
}
|
||||
|
||||
// Get a trimmed address with protocol - useful as address in RPC connections
|
||||
func (u parsedURL) GetTrimmedURL() string {
|
||||
return u.Scheme + "://" + u.GetTrimmedHostWithPath()
|
||||
}
|
||||
|
||||
// HTTPClient is a common interface for JSONRPCClient and URIClient.
|
||||
type HTTPClient interface {
|
||||
Call(method string, params map[string]interface{}, result interface{}) (interface{}, error)
|
||||
@@ -35,62 +84,6 @@ type HTTPClient interface {
|
||||
SetCodec(*amino.Codec)
|
||||
}
|
||||
|
||||
// protocol - client's protocol (for example, "http", "https", "wss", "ws", "tcp")
|
||||
// trimmedS - rest of the address (for example, "192.0.2.1:25", "[2001:db8::1]:80") with "/" replaced with "."
|
||||
func toClientAddrAndParse(remoteAddr string) (network string, trimmedS string, err error) {
|
||||
protocol, address, err := parseRemoteAddr(remoteAddr)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
// protocol to use for http operations, to support both http and https
|
||||
var clientProtocol string
|
||||
// default to http for unknown protocols (ex. tcp)
|
||||
switch protocol {
|
||||
case protoHTTP, protoHTTPS, protoWS, protoWSS:
|
||||
clientProtocol = protocol
|
||||
default:
|
||||
clientProtocol = protoHTTP
|
||||
}
|
||||
|
||||
// replace / with . for http requests (kvstore domain)
|
||||
trimmedAddress := strings.Replace(address, "/", ".", -1)
|
||||
return clientProtocol, trimmedAddress, nil
|
||||
}
|
||||
|
||||
func toClientAddress(remoteAddr string) (string, error) {
|
||||
clientProtocol, trimmedAddress, err := toClientAddrAndParse(remoteAddr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return clientProtocol + "://" + trimmedAddress, nil
|
||||
}
|
||||
|
||||
// network - name of the network (for example, "tcp", "unix")
|
||||
// s - rest of the address (for example, "192.0.2.1:25", "[2001:db8::1]:80")
|
||||
// TODO: Deprecate support for IP:PORT or /path/to/socket
|
||||
func parseRemoteAddr(remoteAddr string) (network string, s string, err error) {
|
||||
parts := strings.SplitN(remoteAddr, "://", 2)
|
||||
var protocol, address string
|
||||
switch {
|
||||
case len(parts) == 1:
|
||||
// default to tcp if nothing specified
|
||||
protocol, address = protoTCP, remoteAddr
|
||||
case len(parts) == 2:
|
||||
protocol, address = parts[0], parts[1]
|
||||
default:
|
||||
return "", "", fmt.Errorf("invalid addr: %s", remoteAddr)
|
||||
}
|
||||
|
||||
// accept http(s) as an alias for tcp
|
||||
switch protocol {
|
||||
case protoHTTP, protoHTTPS:
|
||||
protocol = protoTCP
|
||||
}
|
||||
|
||||
return protocol, address, nil
|
||||
}
|
||||
|
||||
func makeErrorDialer(err error) func(string, string) (net.Conn, error) {
|
||||
return func(_ string, _ string) (net.Conn, error) {
|
||||
return nil, err
|
||||
@@ -98,13 +91,21 @@ func makeErrorDialer(err error) func(string, string) (net.Conn, error) {
|
||||
}
|
||||
|
||||
func makeHTTPDialer(remoteAddr string) func(string, string) (net.Conn, error) {
|
||||
protocol, address, err := parseRemoteAddr(remoteAddr)
|
||||
u, err := newParsedURL(remoteAddr)
|
||||
if err != nil {
|
||||
return makeErrorDialer(err)
|
||||
}
|
||||
|
||||
protocol := u.Scheme
|
||||
|
||||
// accept http(s) as an alias for tcp
|
||||
switch protocol {
|
||||
case protoHTTP, protoHTTPS:
|
||||
protocol = protoTCP
|
||||
}
|
||||
|
||||
return func(proto, addr string) (net.Conn, error) {
|
||||
return net.Dial(protocol, address)
|
||||
return net.Dial(protocol, u.GetHostWithPath())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -142,10 +143,12 @@ type JSONRPCRequestBatch struct {
|
||||
|
||||
// JSONRPCClient takes params as a slice
|
||||
type JSONRPCClient struct {
|
||||
address string
|
||||
client *http.Client
|
||||
id types.JSONRPCStringID
|
||||
cdc *amino.Codec
|
||||
address string
|
||||
username string
|
||||
password string
|
||||
client *http.Client
|
||||
id types.JSONRPCStringID
|
||||
cdc *amino.Codec
|
||||
}
|
||||
|
||||
// JSONRPCCaller implementers can facilitate calling the JSON RPC endpoint.
|
||||
@@ -170,16 +173,24 @@ func NewJSONRPCClientWithHTTPClient(remote string, client *http.Client) *JSONRPC
|
||||
panic("nil http.Client provided")
|
||||
}
|
||||
|
||||
clientAddress, err := toClientAddress(remote)
|
||||
parsedURL, err := newParsedURL(remote)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("invalid remote %s: %s", remote, err))
|
||||
}
|
||||
|
||||
parsedURL.SetDefaultSchemeHTTP()
|
||||
|
||||
address := parsedURL.GetTrimmedURL()
|
||||
username := parsedURL.User.Username()
|
||||
password, _ := parsedURL.User.Password()
|
||||
|
||||
return &JSONRPCClient{
|
||||
address: clientAddress,
|
||||
client: client,
|
||||
id: types.JSONRPCStringID("jsonrpc-client-" + cmn.RandStr(8)),
|
||||
cdc: amino.NewCodec(),
|
||||
address: address,
|
||||
username: username,
|
||||
password: password,
|
||||
client: client,
|
||||
id: types.JSONRPCStringID("jsonrpc-client-" + cmn.RandStr(8)),
|
||||
cdc: amino.NewCodec(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -195,7 +206,15 @@ func (c *JSONRPCClient) Call(method string, params map[string]interface{}, resul
|
||||
return nil, err
|
||||
}
|
||||
requestBuf := bytes.NewBuffer(requestBytes)
|
||||
httpResponse, err := c.client.Post(c.address, "text/json", requestBuf)
|
||||
httpRequest, err := http.NewRequest(http.MethodPost, c.address, requestBuf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
httpRequest.Header.Set("Content-Type", "text/json")
|
||||
if c.username != "" || c.password != "" {
|
||||
httpRequest.SetBasicAuth(c.username, c.password)
|
||||
}
|
||||
httpResponse, err := c.client.Do(httpRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -228,7 +247,15 @@ func (c *JSONRPCClient) sendBatch(requests []*jsonRPCBufferedRequest) ([]interfa
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
httpResponse, err := c.client.Post(c.address, "text/json", bytes.NewBuffer(requestBytes))
|
||||
httpRequest, err := http.NewRequest(http.MethodPost, c.address, bytes.NewBuffer(requestBytes))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
httpRequest.Header.Set("Content-Type", "text/json")
|
||||
if c.username != "" || c.password != "" {
|
||||
httpRequest.SetBasicAuth(c.username, c.password)
|
||||
}
|
||||
httpResponse, err := c.client.Do(httpRequest)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -315,12 +342,15 @@ type URIClient struct {
|
||||
|
||||
// The function panics if the provided remote is invalid.
|
||||
func NewURIClient(remote string) *URIClient {
|
||||
clientAddress, err := toClientAddress(remote)
|
||||
parsedURL, err := newParsedURL(remote)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("invalid remote %s: %s", remote, err))
|
||||
}
|
||||
|
||||
parsedURL.SetDefaultSchemeHTTP()
|
||||
|
||||
return &URIClient{
|
||||
address: clientAddress,
|
||||
address: parsedURL.GetTrimmedURL(),
|
||||
client: DefaultHTTPClient(remote),
|
||||
cdc: amino.NewCodec(),
|
||||
}
|
||||
|
||||
22
rpc/lib/client/http_client_test.go
Normal file
22
rpc/lib/client/http_client_test.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package rpcclient
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestHTTPClientMakeHTTPDialer(t *testing.T) {
|
||||
remote := []string{"https://foo-bar.com:80", "http://foo-bar.net:80", "https://user:pass@foo-bar.net:80"}
|
||||
|
||||
for _, f := range remote {
|
||||
u, err := newParsedURL(f)
|
||||
require.NoError(t, err)
|
||||
dialFn := makeHTTPDialer(f)
|
||||
|
||||
addr, err := dialFn(u.Scheme, u.GetHostWithPath())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, addr)
|
||||
}
|
||||
|
||||
}
|
||||
@@ -27,7 +27,7 @@ const (
|
||||
|
||||
// WSClient is a WebSocket client. The methods of WSClient are safe for use by
|
||||
// multiple goroutines.
|
||||
type WSClient struct { // nolint: maligned
|
||||
type WSClient struct {
|
||||
conn *websocket.Conn
|
||||
cdc *amino.Codec
|
||||
|
||||
@@ -80,18 +80,18 @@ type WSClient struct { // nolint: maligned
|
||||
// pong wait time. The endpoint argument must begin with a `/`.
|
||||
// The function panics if the provided address is invalid.
|
||||
func NewWSClient(remoteAddr, endpoint string, options ...func(*WSClient)) *WSClient {
|
||||
protocol, addr, err := toClientAddrAndParse(remoteAddr)
|
||||
parsedURL, err := newParsedURL(remoteAddr)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("invalid remote %s: %s", remoteAddr, err))
|
||||
}
|
||||
// default to ws protocol, unless wss is explicitly specified
|
||||
if protocol != "wss" {
|
||||
protocol = "ws"
|
||||
if parsedURL.Scheme != protoWSS {
|
||||
parsedURL.Scheme = protoWS
|
||||
}
|
||||
|
||||
c := &WSClient{
|
||||
cdc: amino.NewCodec(),
|
||||
Address: addr,
|
||||
Address: parsedURL.GetTrimmedHostWithPath(),
|
||||
Dialer: makeHTTPDialer(remoteAddr),
|
||||
Endpoint: endpoint,
|
||||
PingPongLatencyTimer: metrics.NewTimer(),
|
||||
@@ -100,7 +100,7 @@ func NewWSClient(remoteAddr, endpoint string, options ...func(*WSClient)) *WSCli
|
||||
readWait: defaultReadWait,
|
||||
writeWait: defaultWriteWait,
|
||||
pingPeriod: defaultPingPeriod,
|
||||
protocol: protocol,
|
||||
protocol: parsedURL.Scheme,
|
||||
}
|
||||
c.BaseService = *cmn.NewBaseService(nil, "WSClient", c)
|
||||
for _, option := range options {
|
||||
|
||||
@@ -64,7 +64,8 @@ func TestWSClientReconnectsAfterReadFailure(t *testing.T) {
|
||||
s := httptest.NewServer(h)
|
||||
defer s.Close()
|
||||
|
||||
c := startClient(t, s.Listener.Addr().String())
|
||||
// https://github.com/golang/go/issues/19297#issuecomment-282651469
|
||||
c := startClient(t, "//"+s.Listener.Addr().String())
|
||||
defer c.Stop()
|
||||
|
||||
wg.Add(1)
|
||||
@@ -96,7 +97,8 @@ func TestWSClientReconnectsAfterWriteFailure(t *testing.T) {
|
||||
h := &myHandler{}
|
||||
s := httptest.NewServer(h)
|
||||
|
||||
c := startClient(t, s.Listener.Addr().String())
|
||||
// https://github.com/golang/go/issues/19297#issuecomment-282651469
|
||||
c := startClient(t, "//"+s.Listener.Addr().String())
|
||||
defer c.Stop()
|
||||
|
||||
wg.Add(2)
|
||||
@@ -124,7 +126,8 @@ func TestWSClientReconnectFailure(t *testing.T) {
|
||||
h := &myHandler{}
|
||||
s := httptest.NewServer(h)
|
||||
|
||||
c := startClient(t, s.Listener.Addr().String())
|
||||
// https://github.com/golang/go/issues/19297#issuecomment-282651469
|
||||
c := startClient(t, "//"+s.Listener.Addr().String())
|
||||
defer c.Stop()
|
||||
|
||||
go func() {
|
||||
@@ -173,7 +176,7 @@ func TestWSClientReconnectFailure(t *testing.T) {
|
||||
func TestNotBlockingOnStop(t *testing.T) {
|
||||
timeout := 2 * time.Second
|
||||
s := httptest.NewServer(&myHandler{})
|
||||
c := startClient(t, s.Listener.Addr().String())
|
||||
c := startClient(t, "//"+s.Listener.Addr().String())
|
||||
c.Call(context.Background(), "a", make(map[string]interface{}))
|
||||
// Let the readRoutine get around to blocking
|
||||
time.Sleep(time.Second)
|
||||
|
||||
@@ -36,11 +36,11 @@ function getCode() {
|
||||
|
||||
# build grpc client if needed
|
||||
if [[ "$GRPC_BROADCAST_TX" != "" ]]; then
|
||||
if [ -f grpc_client ]; then
|
||||
rm grpc_client
|
||||
if [ -f test/app/grpc_client ]; then
|
||||
rm test/app/grpc_client
|
||||
fi
|
||||
echo "... building grpc_client"
|
||||
go build -mod=readonly -o grpc_client grpc_client.go
|
||||
go build -mod=readonly -o test/app/grpc_client test/app/grpc_client.go
|
||||
fi
|
||||
|
||||
function sendTx() {
|
||||
@@ -59,7 +59,7 @@ function sendTx() {
|
||||
|
||||
RESPONSE=$(echo "$RESPONSE" | jq '.result')
|
||||
else
|
||||
RESPONSE=$(./grpc_client "$TX")
|
||||
RESPONSE=$(./test/app/grpc_client "$TX")
|
||||
IS_ERR=false
|
||||
ERROR=""
|
||||
fi
|
||||
|
||||
@@ -22,7 +22,7 @@ function kvstore_over_socket(){
|
||||
sleep 5
|
||||
|
||||
echo "running test"
|
||||
bash kvstore_test.sh "KVStore over Socket"
|
||||
bash test/app/kvstore_test.sh "KVStore over Socket"
|
||||
|
||||
kill -9 $pid_kvstore $pid_tendermint
|
||||
}
|
||||
@@ -40,7 +40,7 @@ function kvstore_over_socket_reorder(){
|
||||
sleep 5
|
||||
|
||||
echo "running test"
|
||||
bash kvstore_test.sh "KVStore over Socket"
|
||||
bash test/app/kvstore_test.sh "KVStore over Socket"
|
||||
|
||||
kill -9 $pid_kvstore $pid_tendermint
|
||||
}
|
||||
@@ -57,7 +57,7 @@ function counter_over_socket() {
|
||||
sleep 5
|
||||
|
||||
echo "running test"
|
||||
bash counter_test.sh "Counter over Socket"
|
||||
bash test/app/counter_test.sh "Counter over Socket"
|
||||
|
||||
kill -9 $pid_counter $pid_tendermint
|
||||
}
|
||||
@@ -73,7 +73,7 @@ function counter_over_grpc() {
|
||||
sleep 5
|
||||
|
||||
echo "running test"
|
||||
bash counter_test.sh "Counter over GRPC"
|
||||
bash test/app/counter_test.sh "Counter over GRPC"
|
||||
|
||||
kill -9 $pid_counter $pid_tendermint
|
||||
}
|
||||
@@ -91,13 +91,11 @@ function counter_over_grpc_grpc() {
|
||||
sleep 5
|
||||
|
||||
echo "running test"
|
||||
GRPC_BROADCAST_TX=true bash counter_test.sh "Counter over GRPC via GRPC BroadcastTx"
|
||||
GRPC_BROADCAST_TX=true bash test/app/counter_test.sh "Counter over GRPC via GRPC BroadcastTx"
|
||||
|
||||
kill -9 $pid_counter $pid_tendermint
|
||||
}
|
||||
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint/test/app
|
||||
|
||||
case "$1" in
|
||||
"kvstore_over_socket")
|
||||
kvstore_over_socket
|
||||
@@ -126,4 +124,3 @@ case "$1" in
|
||||
echo ""
|
||||
counter_over_grpc_grpc
|
||||
esac
|
||||
|
||||
|
||||
@@ -1,8 +1,5 @@
|
||||
FROM golang:1.13
|
||||
|
||||
# Add testing deps for curl
|
||||
RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list
|
||||
|
||||
# Grab deps (jq, hexdump, xxd, killall)
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
@@ -28,11 +25,11 @@ RUN make install_abci
|
||||
RUN make install
|
||||
|
||||
RUN tendermint testnet \
|
||||
--config $REPO/test/docker/config-template.toml \
|
||||
--node-dir-prefix="mach" \
|
||||
--v=4 \
|
||||
--populate-persistent-peers=false \
|
||||
--o=$REPO/test/p2p/data
|
||||
--config $REPO/test/docker/config-template.toml \
|
||||
--node-dir-prefix="mach" \
|
||||
--v=4 \
|
||||
--populate-persistent-peers=false \
|
||||
--o=$REPO/test/p2p/data
|
||||
|
||||
# Now copy in the code
|
||||
# NOTE: this will overwrite whatever is in vendor/
|
||||
|
||||
@@ -20,7 +20,8 @@ const (
|
||||
// Must be a string because scripts like dist.sh read this file.
|
||||
// XXX: Don't change the name of this variable or you will break
|
||||
// automation :)
|
||||
TMCoreSemVer = "0.32.7"
|
||||
|
||||
TMCoreSemVer = "0.32.14"
|
||||
|
||||
// ABCISemVer is the semantic version of the ABCI library
|
||||
ABCISemVer = "0.16.1"
|
||||
@@ -33,6 +34,7 @@ type Protocol uint64
|
||||
// Uint64 returns the Protocol version as a uint64,
|
||||
// eg. for compatibility with ABCI types.
|
||||
func (p Protocol) Uint64() uint64 {
|
||||
|
||||
return uint64(p)
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user