mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-12 15:52:50 +00:00
Compare commits
1 Commits
v0.32.5
...
checked_in
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
79210e658d |
@@ -1,126 +1,238 @@
|
||||
version: 2.1
|
||||
version: 2
|
||||
|
||||
executors:
|
||||
golang:
|
||||
docker:
|
||||
- image: tendermintdev/docker-tendermint-build
|
||||
working_directory: /go/src/github.com/tendermint/tendermint
|
||||
environment:
|
||||
GOBIN: /tmp/bin
|
||||
release:
|
||||
machine: true
|
||||
docs:
|
||||
docker:
|
||||
- image: tendermintdev/jq_curl
|
||||
environment:
|
||||
AWS_REGION: us-east-1
|
||||
|
||||
commands:
|
||||
run_test:
|
||||
parameters:
|
||||
script_path:
|
||||
type: string
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/bin
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore go modules cache"
|
||||
keys:
|
||||
- go-mod-v1-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
name: "Running test"
|
||||
command: |
|
||||
bash << parameters.script_path >>
|
||||
defaults: &defaults
|
||||
working_directory: /go/src/github.com/tendermint/tendermint
|
||||
docker:
|
||||
- image: circleci/golang:1.10.3
|
||||
environment:
|
||||
GOBIN: /tmp/workspace/bin
|
||||
|
||||
jobs:
|
||||
setup_dependencies:
|
||||
executor: golang
|
||||
<<: *defaults
|
||||
steps:
|
||||
- run: mkdir -p /tmp/workspace/bin
|
||||
- run: mkdir -p /tmp/workspace/profiles
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore go modules cache"
|
||||
keys:
|
||||
- go-mod-v1-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
command: |
|
||||
mkdir -p /tmp/bin
|
||||
- run:
|
||||
name: Cache go modules
|
||||
command: make go-mod-cache
|
||||
- v3-pkg-cache
|
||||
- run:
|
||||
name: tools
|
||||
command: make tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: "Build binaries"
|
||||
command: make install install_abci
|
||||
- save_cache:
|
||||
name: "Save go modules cache"
|
||||
key: go-mod-v1-{{ checksum "go.sum" }}
|
||||
paths:
|
||||
- "/go/pkg/mod"
|
||||
- save_cache:
|
||||
name: "Save source code cache"
|
||||
key: go-src-v1-{{ .Revision }}
|
||||
paths:
|
||||
- ".git"
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run:
|
||||
name: binaries
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make install install_abci
|
||||
- persist_to_workspace:
|
||||
root: "/tmp/bin"
|
||||
root: /tmp/workspace
|
||||
paths:
|
||||
- "."
|
||||
- bin
|
||||
- profiles
|
||||
- save_cache:
|
||||
key: v3-pkg-cache
|
||||
paths:
|
||||
- /go/pkg
|
||||
# - save_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
# paths:
|
||||
# - /go/src/github.com/tendermint/tendermint
|
||||
|
||||
build_slate:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
# https://discuss.circleci.com/t/saving-cache-stopped-working-warning-skipping-this-step-disabled-in-configuration/24423/2
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- run:
|
||||
name: slate docs
|
||||
command: |
|
||||
set -ex
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make build-slate
|
||||
|
||||
lint:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- run:
|
||||
name: metalinter
|
||||
command: |
|
||||
set -ex
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make metalinter
|
||||
- run:
|
||||
name: check_dep
|
||||
command: |
|
||||
set -ex
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make check_dep
|
||||
|
||||
test_abci_apps:
|
||||
executor: golang
|
||||
<<: *defaults
|
||||
steps:
|
||||
- run_test:
|
||||
script_path: abci/tests/test_app/test.sh
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
# if this test fails, fix it and update the docs at:
|
||||
# https://github.com/tendermint/tendermint/blob/develop/docs/abci-cli.md
|
||||
- run:
|
||||
name: Run abci apps tests
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
bash abci/tests/test_app/test.sh
|
||||
|
||||
# if this test fails, fix it and update the docs at:
|
||||
# https://github.com/tendermint/tendermint/blob/develop/docs/abci-cli.md
|
||||
test_abci_cli:
|
||||
executor: golang
|
||||
<<: *defaults
|
||||
steps:
|
||||
- run_test:
|
||||
script_path: abci/tests/test_cli/test.sh
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- run:
|
||||
name: Run abci-cli tests
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
bash abci/tests/test_cli/test.sh
|
||||
|
||||
test_apps:
|
||||
executor: golang
|
||||
<<: *defaults
|
||||
steps:
|
||||
- run_test:
|
||||
script_path: test/app/test.sh
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
test_persistence:
|
||||
executor: golang
|
||||
steps:
|
||||
- run_test:
|
||||
script_path: test/persist/test_failure_indices.sh
|
||||
- run: sudo apt-get update && sudo apt-get install -y --no-install-recommends bsdmainutils
|
||||
- run:
|
||||
name: Run tests
|
||||
command: bash test/app/test.sh
|
||||
|
||||
test_cover:
|
||||
executor: golang
|
||||
<<: *defaults
|
||||
parallelism: 4
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore go module cache"
|
||||
keys:
|
||||
- go-mod-v2-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
name: "Run tests"
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- run: mkdir -p /tmp/logs
|
||||
- run:
|
||||
name: Run tests
|
||||
command: |
|
||||
export VERSION="$(git describe --tags --long | sed 's/v\(.*\)/\1/')"
|
||||
export GO111MODULE=on
|
||||
mkdir -p /tmp/logs /tmp/workspace/profiles
|
||||
for pkg in $(go list github.com/tendermint/tendermint/... | circleci tests split --split-by=timings); do
|
||||
id=$(basename "$pkg")
|
||||
go test -v -timeout 5m -mod=readonly -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
|
||||
|
||||
GOCACHE=off go test -timeout 5m -race -coverprofile=/tmp/workspace/profiles/$id.out -covermode=atomic "$pkg" | tee "/tmp/logs/$id-$RANDOM.log"
|
||||
done
|
||||
- persist_to_workspace:
|
||||
root: /tmp/workspace
|
||||
@@ -129,25 +241,54 @@ jobs:
|
||||
- store_artifacts:
|
||||
path: /tmp/logs
|
||||
|
||||
localnet:
|
||||
working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint
|
||||
machine:
|
||||
image: circleci/classic:latest
|
||||
environment:
|
||||
GOBIN: /home/circleci/.go_workspace/bin
|
||||
GOPATH: /home/circleci/.go_workspace/
|
||||
GOOS: linux
|
||||
GOARCH: amd64
|
||||
parallelism: 1
|
||||
test_persistence:
|
||||
<<: *defaults
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
key: v3-pkg-cache
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- run:
|
||||
name: run localnet and exit on failure
|
||||
name: tools
|
||||
command: |
|
||||
set -x
|
||||
docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux
|
||||
make localnet-start &
|
||||
./scripts/localnet-blocks-test.sh 40 5 10 localhost
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- run:
|
||||
name: Run tests
|
||||
command: bash test/persist/test_failure_indices.sh
|
||||
|
||||
localnet:
|
||||
working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint
|
||||
machine:
|
||||
image: circleci/classic:latest
|
||||
environment:
|
||||
GOBIN: /home/circleci/.go_workspace/bin
|
||||
GOPATH: /home/circleci/.go_workspace/
|
||||
GOOS: linux
|
||||
GOARCH: amd64
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: run localnet and exit on failure
|
||||
command: |
|
||||
set -x
|
||||
make get_tools
|
||||
make get_vendor_deps
|
||||
make build-linux
|
||||
make localnet-start &
|
||||
./scripts/localnet-blocks-test.sh 40 5 10 localhost
|
||||
|
||||
test_p2p:
|
||||
environment:
|
||||
@@ -164,22 +305,31 @@ jobs:
|
||||
path: /home/circleci/project/test/p2p/logs
|
||||
|
||||
upload_coverage:
|
||||
executor: golang
|
||||
<<: *defaults
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
# - restore_cache:
|
||||
# key: v3-tree-{{ .Environment.CIRCLE_SHA1 }}
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore go module cache"
|
||||
keys:
|
||||
- go-mod-v2-{{ checksum "go.sum" }}
|
||||
- run:
|
||||
name: tools
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_tools
|
||||
- run:
|
||||
name: dependencies
|
||||
command: |
|
||||
export PATH="$GOBIN:$PATH"
|
||||
make get_vendor_deps
|
||||
- run: mkdir -p $GOPATH/src/github.com/tendermint
|
||||
- run: ln -sf /home/circleci/project $GOPATH/src/github.com/tendermint/tendermint
|
||||
|
||||
- run:
|
||||
name: gather
|
||||
command: |
|
||||
set -ex
|
||||
|
||||
echo "mode: atomic" > coverage.txt
|
||||
for prof in $(ls /tmp/workspace/profiles/); do
|
||||
tail -n +2 /tmp/workspace/profiles/"$prof" >> coverage.txt
|
||||
@@ -188,221 +338,14 @@ jobs:
|
||||
name: upload
|
||||
command: bash .circleci/codecov.sh -f coverage.txt
|
||||
|
||||
deploy_docs:
|
||||
executor: docs
|
||||
steps:
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- run:
|
||||
name: Trigger website build
|
||||
command: |
|
||||
curl --silent \
|
||||
--show-error \
|
||||
-X POST \
|
||||
--header "Content-Type: application/json" \
|
||||
-d "{\"branch\": \"$CIRCLE_BRANCH\"}" \
|
||||
"https://circleci.com/api/v1.1/project/github/$CIRCLE_PROJECT_USERNAME/$WEBSITE_REPO_NAME/build?circle-token=$TENDERBOT_API_TOKEN" > response.json
|
||||
|
||||
RESULT=`jq -r '.status' response.json`
|
||||
MESSAGE=`jq -r '.message' response.json`
|
||||
|
||||
if [[ ${RESULT} == "null" ]] || [[ ${RESULT} -ne "200" ]]; then
|
||||
echo "CircleCI API call failed: $MESSAGE"
|
||||
exit 1
|
||||
else
|
||||
echo "Website build started"
|
||||
fi
|
||||
|
||||
prepare_build:
|
||||
executor: golang
|
||||
steps:
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- run:
|
||||
name: Get next release number
|
||||
command: |
|
||||
export LAST_TAG="`git describe --tags --abbrev=0 --match "${CIRCLE_BRANCH}.*"`"
|
||||
echo "Last tag: ${LAST_TAG}"
|
||||
if [ -z "${LAST_TAG}" ]; then
|
||||
export LAST_TAG="${CIRCLE_BRANCH}"
|
||||
echo "Last tag not found. Possibly fresh branch or feature branch. Setting ${LAST_TAG} as tag."
|
||||
fi
|
||||
export NEXT_TAG="`python -u scripts/release_management/bump-semver.py --version "${LAST_TAG}"`"
|
||||
echo "Next tag: ${NEXT_TAG}"
|
||||
echo "export CIRCLE_TAG=\"${NEXT_TAG}\"" > release-version.source
|
||||
- run:
|
||||
name: Build dependencies
|
||||
command: make tools
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths:
|
||||
- "release-version.source"
|
||||
- save_cache:
|
||||
key: v2-release-deps-{{ checksum "go.sum" }}
|
||||
paths:
|
||||
- "/go/pkg/mod"
|
||||
|
||||
build_artifacts:
|
||||
executor: golang
|
||||
parallelism: 4
|
||||
steps:
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- restore_cache:
|
||||
name: "Restore release dependencies cache"
|
||||
keys:
|
||||
- v2-release-deps-{{ checksum "go.sum" }}
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: Build artifact
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 0 ;then export GOOS=linux GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 1 ;then export GOOS=darwin GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 2 ;then export GOOS=windows GOARCH=amd64 && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
if test ${CIRCLE_NODE_INDEX:-0} == 3 ;then export GOOS=linux GOARCH=arm && export OUTPUT=build/tendermint_${GOOS}_${GOARCH} && make build && python -u scripts/release_management/zip-file.py ;fi
|
||||
- persist_to_workspace:
|
||||
root: build
|
||||
paths:
|
||||
- "*.zip"
|
||||
- "tendermint_linux_amd64"
|
||||
|
||||
release_artifacts:
|
||||
executor: golang
|
||||
steps:
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: "Deploy to GitHub"
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
echo "---"
|
||||
ls -la /tmp/workspace/*.zip
|
||||
echo "---"
|
||||
python -u scripts/release_management/sha-files.py
|
||||
echo "---"
|
||||
cat /tmp/workspace/SHA256SUMS
|
||||
echo "---"
|
||||
export RELEASE_ID="`python -u scripts/release_management/github-draft.py`"
|
||||
echo "Release ID: ${RELEASE_ID}"
|
||||
#Todo: Parallelize uploads
|
||||
export GOOS=linux GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=darwin GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=windows GOARCH=amd64 && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
export GOOS=linux GOARCH=arm && python -u scripts/release_management/github-upload.py --id "${RELEASE_ID}"
|
||||
python -u scripts/release_management/github-upload.py --file "/tmp/workspace/SHA256SUMS" --id "${RELEASE_ID}"
|
||||
python -u scripts/release_management/github-publish.py --id "${RELEASE_ID}"
|
||||
|
||||
release_docker:
|
||||
machine:
|
||||
image: ubuntu-1604:201903-01
|
||||
steps:
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- run:
|
||||
name: "Deploy to Docker Hub"
|
||||
command: |
|
||||
# Setting CIRCLE_TAG because we do not tag the release ourselves.
|
||||
source /tmp/workspace/release-version.source
|
||||
cp /tmp/workspace/tendermint_linux_amd64 DOCKER/tendermint
|
||||
docker build --label="tendermint" --tag="tendermint/tendermint:${CIRCLE_TAG}" --tag="tendermint/tendermint:latest" "DOCKER"
|
||||
docker login -u "${DOCKERHUB_USER}" --password-stdin \<<< "${DOCKERHUB_PASS}"
|
||||
docker push "tendermint/tendermint"
|
||||
docker logout
|
||||
|
||||
reproducible_builds:
|
||||
executor: golang
|
||||
steps:
|
||||
- attach_workspace:
|
||||
at: /tmp/workspace
|
||||
- restore_cache:
|
||||
name: "Restore source code cache"
|
||||
keys:
|
||||
- go-src-v1-{{ .Revision }}
|
||||
- checkout
|
||||
- setup_remote_docker:
|
||||
docker_layer_caching: true
|
||||
- run:
|
||||
name: Build tendermint
|
||||
no_output_timeout: 20m
|
||||
command: |
|
||||
sudo apt-get install -y ruby
|
||||
bash -x ./scripts/gitian-build.sh all
|
||||
for os in darwin linux windows; do
|
||||
cp gitian-build-${os}/result/tendermint-${os}-res.yml .
|
||||
cp gitian-build-${os}/build/out/tendermint-*.tar.gz .
|
||||
rm -rf gitian-build-${os}/
|
||||
done
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/tendermint/tendermint/tendermint-darwin-res.yml
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/tendermint/tendermint/tendermint-linux-res.yml
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/tendermint/tendermint/tendermint-windows-res.yml
|
||||
- store_artifacts:
|
||||
path: /go/src/github.com/tendermint/tendermint/tendermint-*.tar.gz
|
||||
|
||||
# Test RPC implementation against the swagger documented specs
|
||||
contract_tests:
|
||||
working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint
|
||||
machine:
|
||||
image: circleci/classic:latest
|
||||
environment:
|
||||
GOBIN: /home/circleci/.go_workspace/bin
|
||||
GOPATH: /home/circleci/.go_workspace/
|
||||
GOOS: linux
|
||||
GOARCH: amd64
|
||||
parallelism: 1
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
name: Test RPC endpoints against swagger documentation
|
||||
command: |
|
||||
set -x
|
||||
export PATH=~/.local/bin:$PATH
|
||||
|
||||
# install node and dredd
|
||||
./scripts/get_nodejs.sh
|
||||
|
||||
# build the binaries with a proper version of Go
|
||||
docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux build-contract-tests-hooks
|
||||
|
||||
# This docker image works with go 1.7, we can install here the hook handler that contract-tests is going to use
|
||||
go get github.com/snikch/goodman/cmd/goodman
|
||||
make contract-tests
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
test-suite:
|
||||
jobs:
|
||||
- deploy_docs:
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- setup_dependencies
|
||||
- lint:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
- test_abci_apps:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
@@ -425,35 +368,3 @@ workflows:
|
||||
- upload_coverage:
|
||||
requires:
|
||||
- test_cover
|
||||
- reproducible_builds:
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- master
|
||||
- /v[0-9]+\.[0-9]+/
|
||||
- contract_tests:
|
||||
requires:
|
||||
- setup_dependencies
|
||||
|
||||
release:
|
||||
jobs:
|
||||
- prepare_build
|
||||
- build_artifacts:
|
||||
requires:
|
||||
- prepare_build
|
||||
- release_artifacts:
|
||||
requires:
|
||||
- prepare_build
|
||||
- build_artifacts
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /v[0-9]+\.[0-9]+/
|
||||
- release_docker:
|
||||
requires:
|
||||
- prepare_build
|
||||
- build_artifacts
|
||||
filters:
|
||||
branches:
|
||||
only:
|
||||
- /v[0-9]+\.[0-9]+/
|
||||
|
||||
4
.github/CODEOWNERS
vendored
4
.github/CODEOWNERS
vendored
@@ -4,6 +4,4 @@
|
||||
* @ebuchman @melekes @xla
|
||||
|
||||
# Precious documentation
|
||||
/docs/README.md @zramsay
|
||||
/docs/DOCS_README.md @zramsay
|
||||
/docs/.vuepress/ @zramsay
|
||||
/docs/ @zramsay
|
||||
|
||||
9
.github/PULL_REQUEST_TEMPLATE.md
vendored
9
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,12 +1,5 @@
|
||||
<!--
|
||||
<!-- Thanks for filing a PR! Before hitting the button, please check the following items.-->
|
||||
|
||||
Thanks for filing a PR! Before hitting the button, please check the following items.
|
||||
Please note that every non-trivial PR must reference an issue that explains the
|
||||
changes in the PR.
|
||||
|
||||
-->
|
||||
|
||||
* [ ] Referenced an issue explaining the need for the change
|
||||
* [ ] Updated all relevant documentation in docs
|
||||
* [ ] Updated all code comments where relevant
|
||||
* [ ] Wrote tests
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -35,7 +35,6 @@ shunit2
|
||||
addrbook.json
|
||||
|
||||
*/vendor
|
||||
.vendor-new/
|
||||
*/.glide
|
||||
.terraform
|
||||
terraform.tfstate
|
||||
@@ -43,5 +42,3 @@ terraform.tfstate.backup
|
||||
terraform.tfstate.d
|
||||
|
||||
.vscode
|
||||
|
||||
profile\.out
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
run:
|
||||
deadline: 1m
|
||||
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
- gocyclo
|
||||
- golint
|
||||
- maligned
|
||||
- errcheck
|
||||
- interfacer
|
||||
- unparam
|
||||
- lll
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- stylecheck
|
||||
# linters-settings:
|
||||
# govet:
|
||||
# check-shadowing: true
|
||||
# golint:
|
||||
# min-confidence: 0
|
||||
# gocyclo:
|
||||
# min-complexity: 10
|
||||
# maligned:
|
||||
# suggest-new: true
|
||||
# dupl:
|
||||
# threshold: 100
|
||||
# depguard:
|
||||
# list-type: blacklist
|
||||
# packages:
|
||||
# # logging is allowed only by logutils.Log, logrus
|
||||
# # is allowed to use only in logutils package
|
||||
# - github.com/sirupsen/logrus
|
||||
# misspell:
|
||||
# locale: US
|
||||
# lll:
|
||||
# line-length: 140
|
||||
# goimports:
|
||||
# local-prefixes: github.com/golangci/golangci-lint
|
||||
# gocritic:
|
||||
# enabled-tags:
|
||||
# - performance
|
||||
# - style
|
||||
# - experimental
|
||||
# disabled-checks:
|
||||
# - wrapperFunc
|
||||
# - commentFormatting # https://github.com/go-critic/go-critic/issues/755
|
||||
|
||||
1274
CHANGELOG.md
1274
CHANGELOG.md
File diff suppressed because it is too large
Load Diff
@@ -1,22 +1,107 @@
|
||||
## v0.32.6
|
||||
# Pending
|
||||
|
||||
\*\*
|
||||
## v0.26.0
|
||||
|
||||
*October 19, 2018*
|
||||
|
||||
Special thanks to external contributors on this release:
|
||||
@bradyjoestar, @connorwstein, @goolAdapter, @HaoyangLiu,
|
||||
@james-ray, @overbool, @phymbert, @Slamper, @Uzair1995
|
||||
|
||||
Friendly reminder, we have a [bug bounty
|
||||
program](https://hackerone.com/tendermint).
|
||||
This release is primarily about adding Version fields to various data structures,
|
||||
optimizing consensus messages for signing and verification in
|
||||
restricted environments (like HSMs and the Ethereum Virtual Machine), and
|
||||
aligning the consensus code with the [specification](https://arxiv.org/abs/1807.04938).
|
||||
It also includes our first take at a generalized merkle proof system.
|
||||
|
||||
### BREAKING CHANGES:
|
||||
See the [UPGRADING.md](UPGRADING.md#v0.26.0) for details on upgrading to the new
|
||||
version.
|
||||
|
||||
- CLI/RPC/Config
|
||||
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
|
||||
|
||||
- Apps
|
||||
BREAKING CHANGES:
|
||||
|
||||
- Go API
|
||||
* CLI/RPC/Config
|
||||
* [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) timeouts as time.Duration, not ints
|
||||
* [config] [\#2505](https://github.com/tendermint/tendermint/issues/2505) Remove Mempool.RecheckEmpty (it was effectively useless anyways)
|
||||
* [config] [\#2490](https://github.com/tendermint/tendermint/issues/2490) `mempool.wal` is disabled by default
|
||||
* [privval] [\#2459](https://github.com/tendermint/tendermint/issues/2459) Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer)
|
||||
* [state] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version field to State, breaking the format of State as
|
||||
encoded on disk.
|
||||
* [rpc] [\#2298](https://github.com/tendermint/tendermint/issues/2298) `/abci_query` takes `prove` argument instead of `trusted` and switches the default
|
||||
behaviour to `prove=false`
|
||||
* [rpc] [\#2654](https://github.com/tendermint/tendermint/issues/2654) Remove all `node_info.other.*_version` fields in `/status` and
|
||||
`/net_info`
|
||||
|
||||
### FEATURES:
|
||||
* Apps
|
||||
* [abci] [\#2298](https://github.com/tendermint/tendermint/issues/2298) ResponseQuery.Proof is now a structured merkle.Proof, not just
|
||||
arbitrary bytes
|
||||
* [abci] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version to Header and shift all fields by one
|
||||
* [abci] [\#2662](https://github.com/tendermint/tendermint/issues/2662) Bump the field numbers for some `ResponseInfo` fields to make room for
|
||||
`AppVersion`
|
||||
|
||||
### IMPROVEMENTS:
|
||||
* Go API
|
||||
* [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) timeouts as time.Duration, not ints
|
||||
* [crypto/merkle & lite] [\#2298](https://github.com/tendermint/tendermint/issues/2298) Various changes to accomodate General Merkle trees
|
||||
* [crypto/merkle] [\#2595](https://github.com/tendermint/tendermint/issues/2595) Remove all Hasher objects in favor of byte slices
|
||||
* [crypto/merkle] [\#2635](https://github.com/tendermint/tendermint/issues/2635) merkle.SimpleHashFromTwoHashes is no longer exported
|
||||
* [node] [\#2479](https://github.com/tendermint/tendermint/issues/2479) Remove node.RunForever
|
||||
* [rpc/client] [\#2298](https://github.com/tendermint/tendermint/issues/2298) `ABCIQueryOptions.Trusted` -> `ABCIQueryOptions.Prove`
|
||||
* [types] [\#2298](https://github.com/tendermint/tendermint/issues/2298) Remove `Index` and `Total` fields from `TxProof`.
|
||||
* [types] [\#2598](https://github.com/tendermint/tendermint/issues/2598) `VoteTypeXxx` are now of type `SignedMsgType byte` and named `XxxType`, eg. `PrevoteType`,
|
||||
`PrecommitType`.
|
||||
|
||||
* Blockchain Protocol
|
||||
* [types] Update SignBytes for `Vote`/`Proposal`/`Heartbeat`:
|
||||
* [\#2459](https://github.com/tendermint/tendermint/issues/2459) Use amino encoding instead of JSON in `SignBytes`.
|
||||
* [\#2598](https://github.com/tendermint/tendermint/issues/2598) Reorder fields and use fixed sized encoding.
|
||||
* [\#2598](https://github.com/tendermint/tendermint/issues/2598) Change `Type` field fromt `string` to `byte` and use new
|
||||
`SignedMsgType` to enumerate.
|
||||
* [types] [\#2512](https://github.com/tendermint/tendermint/issues/2512) Remove the pubkey field from the validator hash
|
||||
* [types] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Add Version struct to Header
|
||||
* [types] [\#2609](https://github.com/tendermint/tendermint/issues/2609) ConsensusParams.Hash() is the hash of the amino encoded
|
||||
struct instead of the Merkle tree of the fields
|
||||
* [state] [\#2587](https://github.com/tendermint/tendermint/issues/2587) Require block.Time of the fist block to be genesis time
|
||||
* [state] [\#2644](https://github.com/tendermint/tendermint/issues/2644) Require block.Version to match state.Version
|
||||
* [types] [\#2670](https://github.com/tendermint/tendermint/issues/2670) Header.Hash() builds Merkle tree out of fields in the same
|
||||
order they appear in the header, instead of sorting by field name
|
||||
|
||||
* P2P Protocol
|
||||
* [p2p] [\#2654](https://github.com/tendermint/tendermint/issues/2654) Add `ProtocolVersion` struct with protocol versions to top of
|
||||
DefaultNodeInfo and require `ProtocolVersion.Block` to match during peer handshake
|
||||
|
||||
FEATURES:
|
||||
- [abci] [\#2557](https://github.com/tendermint/tendermint/issues/2557) Add `Codespace` field to `Response{CheckTx, DeliverTx, Query}`
|
||||
- [abci] [\#2662](https://github.com/tendermint/tendermint/issues/2662) Add `BlockVersion` and `P2PVersion` to `RequestInfo`
|
||||
- [crypto/merkle] [\#2298](https://github.com/tendermint/tendermint/issues/2298) General Merkle Proof scheme for chaining various types of Merkle trees together
|
||||
|
||||
IMPROVEMENTS:
|
||||
- Additional Metrics
|
||||
- [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169)
|
||||
- [p2p] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169)
|
||||
- [config] [\#2232](https://github.com/tendermint/tendermint/issues/2232) Added ValidateBasic method, which performs basic checks
|
||||
- [crypto/ed25519] [\#2558](https://github.com/tendermint/tendermint/issues/2558) Switch to use latest `golang.org/x/crypto` through our fork at
|
||||
github.com/tendermint/crypto
|
||||
- [tools] [\#2238](https://github.com/tendermint/tendermint/issues/2238) Binary dependencies are now locked to a specific git commit
|
||||
|
||||
BUG FIXES:
|
||||
- [autofile] [\#2428](https://github.com/tendermint/tendermint/issues/2428) Group.RotateFile need call Flush() before rename (@goolAdapter)
|
||||
- [common] [\#2533](https://github.com/tendermint/tendermint/issues/2533) Fixed a bug in the `BitArray.Or` method
|
||||
- [common] [\#2506](https://github.com/tendermint/tendermint/issues/2506) Fixed a bug in the `BitArray.Sub` method (@james-ray)
|
||||
- [common] [\#2534](https://github.com/tendermint/tendermint/issues/2534) Fix `BitArray.PickRandom` to choose uniformly from true bits
|
||||
- [consensus] [\#1690](https://github.com/tendermint/tendermint/issues/1690) Wait for
|
||||
timeoutPrecommit before starting next round
|
||||
- [consensus] [\#1745](https://github.com/tendermint/tendermint/issues/1745) Wait for
|
||||
Proposal or timeoutProposal before entering prevote
|
||||
- [consensus] [\#2642](https://github.com/tendermint/tendermint/issues/2642) Only propose ValidBlock, not LockedBlock
|
||||
- [consensus] [\#2642](https://github.com/tendermint/tendermint/issues/2642) Initialized ValidRound and LockedRound to -1
|
||||
- [consensus] [\#1637](https://github.com/tendermint/tendermint/issues/1637) Limit the amount of evidence that can be included in a
|
||||
block
|
||||
- [evidence] [\#2515](https://github.com/tendermint/tendermint/issues/2515) Fix db iter leak (@goolAdapter)
|
||||
- [libs/event] [\#2518](https://github.com/tendermint/tendermint/issues/2518) Fix event concurrency flaw (@goolAdapter)
|
||||
- [node] [\#2434](https://github.com/tendermint/tendermint/issues/2434) Make node respond to signal interrupts while sleeping for genesis time
|
||||
- [state] [\#2616](https://github.com/tendermint/tendermint/issues/2616) Pass nil to NewValidatorSet() when genesis file's Validators field is nil
|
||||
- [p2p] [\#2555](https://github.com/tendermint/tendermint/issues/2555) Fix p2p switch FlushThrottle value (@goolAdapter)
|
||||
- [p2p] [\#2668](https://github.com/tendermint/tendermint/issues/2668) Reconnect to originally dialed address (not self-reported
|
||||
address) for persistent peers
|
||||
|
||||
### BUG FIXES:
|
||||
|
||||
@@ -6,7 +6,7 @@ This code of conduct applies to all projects run by the Tendermint/COSMOS team a
|
||||
|
||||
|
||||
# Conduct
|
||||
## Contact: conduct@tendermint.com
|
||||
## Contact: adrian@tendermint.com
|
||||
|
||||
* We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic.
|
||||
|
||||
|
||||
190
CONTRIBUTING.md
190
CONTRIBUTING.md
@@ -2,35 +2,24 @@
|
||||
|
||||
Thank you for considering making contributions to Tendermint and related repositories! Start by taking a look at the [coding repo](https://github.com/tendermint/coding) for overall information on repository workflow and standards.
|
||||
|
||||
Please follow standard github best practices: fork the repo, branch from the tip of `master`, make some commits, and submit a pull request to `master`.
|
||||
See the [open issues](https://github.com/tendermint/tendermint/issues) for things we need help with!
|
||||
Please follow standard github best practices: fork the repo, branch from the tip of develop, make some commits, and submit a pull request to develop. See the [open issues](https://github.com/tendermint/tendermint/issues) for things we need help with!
|
||||
|
||||
Before making a pull request, please open an issue describing the
|
||||
change you would like to make. If an issue for your change already exists,
|
||||
please comment on it that you will submit a pull request. Be sure to reference the issue in the opening
|
||||
comment of your pull request. If your change is substantial, you will be asked
|
||||
to write a more detailed design document in the form of an
|
||||
Architectural Decision Record (ie. see [here](./docs/architecture/)) before submitting code
|
||||
changes.
|
||||
|
||||
Please open a [Draft PR](https://github.blog/2019-02-14-introducing-draft-pull-requests/), even if your contribution is incomplete, this inidicates to the community you're working on something and allows them to provide comments early in the development process. When the code is complete it can be marked as ready-for-review.
|
||||
|
||||
Please make sure to use `gofmt` before every commit - the easiest way to do this is have your editor run it for you upon saving a file. Additionally please ensure that your code is lint compliant by running `make lint`
|
||||
Please make sure to use `gofmt` before every commit - the easiest way to do this is have your editor run it for you upon saving a file.
|
||||
|
||||
## Forking
|
||||
|
||||
Please note that Go requires code to live under absolute paths, which complicates forking.
|
||||
While my fork lives at `https://github.com/ebuchman/tendermint`,
|
||||
the code should never exist at `$GOPATH/src/github.com/ebuchman/tendermint`.
|
||||
the code should never exist at `$GOPATH/src/github.com/ebuchman/tendermint`.
|
||||
Instead, we use `git remote` to add the fork as a new remote for the original repo,
|
||||
`$GOPATH/src/github.com/tendermint/tendermint`, and do all the work there.
|
||||
`$GOPATH/src/github.com/tendermint/tendermint `, and do all the work there.
|
||||
|
||||
For instance, to create a fork and work on a branch of it, I would:
|
||||
|
||||
- Create the fork on github, using the fork button.
|
||||
- Go to the original repo checked out locally (i.e. `$GOPATH/src/github.com/tendermint/tendermint`)
|
||||
- `git remote rename origin upstream`
|
||||
- `git remote add origin git@github.com:ebuchman/basecoin.git`
|
||||
* Create the fork on github, using the fork button.
|
||||
* Go to the original repo checked out locally (i.e. `$GOPATH/src/github.com/tendermint/tendermint`)
|
||||
* `git remote rename origin upstream`
|
||||
* `git remote add origin git@github.com:ebuchman/basecoin.git`
|
||||
|
||||
Now `origin` refers to my fork and `upstream` refers to the tendermint version.
|
||||
So I can `git push -u origin master` to update my fork, and make pull requests to tendermint from there.
|
||||
@@ -38,12 +27,14 @@ Of course, replace `ebuchman` with your git handle.
|
||||
|
||||
To pull in updates from the origin repo, run
|
||||
|
||||
- `git fetch upstream`
|
||||
- `git rebase upstream/master` (or whatever branch you want)
|
||||
* `git fetch upstream`
|
||||
* `git rebase upstream/master` (or whatever branch you want)
|
||||
|
||||
Please don't make Pull Requests to `master`.
|
||||
|
||||
## Dependencies
|
||||
|
||||
We use [go modules](https://github.com/golang/go/wiki/Modules) to manage dependencies.
|
||||
We use [dep](https://github.com/golang/dep) to manage dependencies.
|
||||
|
||||
That said, the master branch of every Tendermint repository should just build
|
||||
with `go get`, which means they should be kept up-to-date with their
|
||||
@@ -51,18 +42,14 @@ dependencies so we can get away with telling people they can just `go get` our
|
||||
software.
|
||||
|
||||
Since some dependencies are not under our control, a third party may break our
|
||||
build, in which case we can fall back on `go mod tidy`. Even for dependencies under our control, go helps us to
|
||||
build, in which case we can fall back on `dep ensure` (or `make
|
||||
get_vendor_deps`). Even for dependencies under our control, dep helps us to
|
||||
keep multiple repos in sync as they evolve. Anything with an executable, such
|
||||
as apps, tools, and the core, should use dep.
|
||||
|
||||
Run `go list -u -m all` to get a list of dependencies that may not be
|
||||
Run `dep status` to get a list of vendor dependencies that may not be
|
||||
up-to-date.
|
||||
|
||||
When updating dependencies, please only update the particular dependencies you
|
||||
need. Instead of running `go get -u=patch`, which will update anything,
|
||||
specify exactly the dependency you want to update, eg.
|
||||
`GO111MODULE=on go get -u github.com/tendermint/go-amino@master`.
|
||||
|
||||
## Vagrant
|
||||
|
||||
If you are a [Vagrant](https://www.vagrantup.com/) user, you can get started
|
||||
@@ -77,103 +64,6 @@ vagrant ssh
|
||||
make test
|
||||
```
|
||||
|
||||
## Changelog
|
||||
|
||||
Every fix, improvement, feature, or breaking change should be made in a
|
||||
pull-request that includes an update to the `CHANGELOG_PENDING.md` file.
|
||||
|
||||
Changelog entries should be formatted as follows:
|
||||
|
||||
```
|
||||
- [module] \#xxx Some description about the change (@contributor)
|
||||
```
|
||||
|
||||
Here, `module` is the part of the code that changed (typically a
|
||||
top-level Go package), `xxx` is the pull-request number, and `contributor`
|
||||
is the author/s of the change.
|
||||
|
||||
It's also acceptable for `xxx` to refer to the relevent issue number, but pull-request
|
||||
numbers are preferred.
|
||||
Note this means pull-requests should be opened first so the changelog can then
|
||||
be updated with the pull-request's number.
|
||||
There is no need to include the full link, as this will be added
|
||||
automatically during release. But please include the backslash and pound, eg. `\#2313`.
|
||||
|
||||
Changelog entries should be ordered alphabetically according to the
|
||||
`module`, and numerically according to the pull-request number.
|
||||
|
||||
Changes with multiple classifications should be doubly included (eg. a bug fix
|
||||
that is also a breaking change should be recorded under both).
|
||||
|
||||
Breaking changes are further subdivided according to the APIs/users they impact.
|
||||
Any change that effects multiple APIs/users should be recorded multiply - for
|
||||
instance, a change to the `Blockchain Protocol` that removes a field from the
|
||||
header should also be recorded under `CLI/RPC/Config` since the field will be
|
||||
removed from the header in rpc responses as well.
|
||||
|
||||
## Branching Model and Release
|
||||
|
||||
The main development branch is master.
|
||||
|
||||
Every release is maintained in a release branch named `vX.Y.Z`.
|
||||
|
||||
Note all pull requests should be squash merged except for merging to a release branch (named `vX.Y`). This keeps the commit history clean and makes it
|
||||
easy to reference the pull request where a change was introduced.
|
||||
|
||||
### Development Procedure
|
||||
|
||||
- the latest state of development is on `master`
|
||||
- `master` must never fail `make test`
|
||||
- never --force onto `master` (except when reverting a broken commit, which should seldom happen)
|
||||
- create a development branch either on github.com/tendermint/tendermint, or your fork (using `git remote add origin`)
|
||||
- make changes and update the `CHANGELOG_PENDING.md` to record your change
|
||||
- before submitting a pull request, run `git rebase` on top of the latest `master`
|
||||
|
||||
### Pull Merge Procedure
|
||||
|
||||
- ensure pull branch is based on a recent `master`
|
||||
- run `make test` to ensure that all tests pass
|
||||
- squash merge pull request
|
||||
- the `unstable` branch may be used to aggregate pull merges before fixing tests
|
||||
|
||||
### Release Procedure
|
||||
|
||||
#### Major Release
|
||||
|
||||
1. start on `master`
|
||||
2. run integration tests (see `test_integrations` in Makefile)
|
||||
3. prepare release in a pull request against `master` (to be squash merged):
|
||||
- copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
|
||||
- run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
|
||||
all issues
|
||||
- run `bash ./scripts/authors.sh` to get a list of authors since the latest
|
||||
release, and add the github aliases of external contributors to the top of
|
||||
the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
|
||||
- reset the `CHANGELOG_PENDING.md`
|
||||
- bump versions
|
||||
4. push your changes with prepared release details to `vX.X` (this will trigger the release `vX.X.0`)
|
||||
5. merge back to master (don't squash merge!)
|
||||
|
||||
#### Minor Release
|
||||
|
||||
If there were no breaking changes and you need to create a release nonetheless,
|
||||
the procedure is almost exactly like with a new release above.
|
||||
|
||||
The only difference is that in the end you create a pull request against the existing `X.X` branch.
|
||||
The branch name should match the release number you want to create.
|
||||
Merging this PR will trigger the next release.
|
||||
For example, if the PR is against an existing 0.34 branch which already contains a v0.34.0 release/tag,
|
||||
the patch version will be incremented and the created release will be v0.34.1.
|
||||
|
||||
#### Backport Release
|
||||
|
||||
1. start from the existing release branch you want to backport changes to (e.g. v0.30)
|
||||
Branch to a release/vX.X.X branch locally (e.g. release/v0.30.7)
|
||||
2. cherry pick the commit(s) that contain the changes you want to backport (usually these commits are from squash-merged PRs which were already reviewed)
|
||||
3. steps 2 and 3 from [Major Release](#major-release)
|
||||
4. push changes to release/vX.X.X branch
|
||||
5. open a PR against the existing vX.X branch
|
||||
|
||||
## Testing
|
||||
|
||||
All repos should be hooked up to [CircleCI](https://circleci.com/).
|
||||
@@ -183,15 +73,45 @@ tested by circle using `go test -v -race ./...`. If not, they will need a
|
||||
`circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and
|
||||
includes its continuous integration status using a badge in the `README.md`.
|
||||
|
||||
### RPC Testing
|
||||
## Branching Model and Release
|
||||
|
||||
If you contribute to the RPC endpoints it's important to document your changes in the [Swagger file](./docs/spec/rpc/swagger.yaml)
|
||||
To test your changes you should install `nodejs` and run:
|
||||
User-facing repos should adhere to the branching model: http://nvie.com/posts/a-successful-git-branching-model/.
|
||||
That is, these repos should be well versioned, and any merge to master requires a version bump and tagged release.
|
||||
|
||||
```bash
|
||||
npm i -g dredd
|
||||
make build-linux build-contract-tests-hooks
|
||||
make contract-tests
|
||||
```
|
||||
Libraries need not follow the model strictly, but would be wise to,
|
||||
especially `go-p2p` and `go-rpc`, as their versions are referenced in tendermint core.
|
||||
|
||||
This command will popup a network and check every endpoint against what has been documented
|
||||
### Development Procedure:
|
||||
- the latest state of development is on `develop`
|
||||
- `develop` must never fail `make test`
|
||||
- no --force onto `develop` (except when reverting a broken commit, which should seldom happen)
|
||||
- create a development branch either on github.com/tendermint/tendermint, or your fork (using `git remote add origin`)
|
||||
- before submitting a pull request, begin `git rebase` on top of `develop`
|
||||
|
||||
### Pull Merge Procedure:
|
||||
- ensure pull branch is rebased on develop
|
||||
- run `make test` to ensure that all tests pass
|
||||
- merge pull request
|
||||
- the `unstable` branch may be used to aggregate pull merges before testing once
|
||||
- push master may request that pull requests be rebased on top of `unstable`
|
||||
|
||||
### Release Procedure:
|
||||
- start on `develop`
|
||||
- run integration tests (see `test_integrations` in Makefile)
|
||||
- prepare changelog/release issue
|
||||
- bump versions
|
||||
- push to release-vX.X.X to run the extended integration tests on the CI
|
||||
- merge to master
|
||||
- merge master back to develop
|
||||
|
||||
### Hotfix Procedure:
|
||||
- start on `master`
|
||||
- checkout a new branch named hotfix-vX.X.X
|
||||
- make the required changes
|
||||
- these changes should be small and an absolute necessity
|
||||
- add a note to CHANGELOG.md
|
||||
- bump versions
|
||||
- push to hotfix-vX.X.X to run the extended integration tests on the CI
|
||||
- merge hotfix-vX.X.X to master
|
||||
- merge hotfix-vX.X.X to develop
|
||||
- delete the hotfix-vX.X.X branch
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
FROM alpine:3.9
|
||||
LABEL maintainer="hello@tendermint.com"
|
||||
FROM alpine:3.7
|
||||
MAINTAINER Greg Szabo <greg@tendermint.com>
|
||||
|
||||
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json
|
||||
# (unless you change `genesis_file` in config.toml). You can put your config.toml and
|
||||
|
||||
@@ -15,7 +15,7 @@ RUN apt-get update && apt-get install -y \
|
||||
|
||||
COPY Gopkg.toml /go/src/github.com/tendermint/abci/
|
||||
COPY Gopkg.lock /go/src/github.com/tendermint/abci/
|
||||
RUN make tools
|
||||
RUN make get_tools
|
||||
|
||||
# see https://github.com/golang/dep/issues/1312
|
||||
RUN dep ensure -vendor-only
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
FROM amazonlinux:2
|
||||
|
||||
RUN yum -y update && \
|
||||
yum -y install wget
|
||||
|
||||
RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm && \
|
||||
rpm -ivh epel-release-latest-7.noarch.rpm
|
||||
|
||||
RUN yum -y groupinstall "Development Tools"
|
||||
RUN yum -y install leveldb-devel which
|
||||
|
||||
ENV GOVERSION=1.12.9
|
||||
|
||||
RUN cd /tmp && \
|
||||
wget https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
tar -C /usr/local -xf go${GOVERSION}.linux-amd64.tar.gz && \
|
||||
mkdir -p /go/src && \
|
||||
mkdir -p /go/bin
|
||||
|
||||
ENV PATH=$PATH:/usr/local/go/bin:/go/bin
|
||||
ENV GOBIN=/go/bin
|
||||
ENV GOPATH=/go/src
|
||||
|
||||
RUN mkdir -p /tendermint
|
||||
WORKDIR /tendermint
|
||||
|
||||
CMD ["/usr/bin/make", "build_c"]
|
||||
|
||||
35
DOCKER/Dockerfile.develop
Normal file
35
DOCKER/Dockerfile.develop
Normal file
@@ -0,0 +1,35 @@
|
||||
FROM alpine:3.7
|
||||
|
||||
ENV DATA_ROOT /tendermint
|
||||
ENV TMHOME $DATA_ROOT
|
||||
|
||||
RUN addgroup tmuser && \
|
||||
adduser -S -G tmuser tmuser
|
||||
|
||||
RUN mkdir -p $DATA_ROOT && \
|
||||
chown -R tmuser:tmuser $DATA_ROOT
|
||||
|
||||
RUN apk add --no-cache bash curl jq
|
||||
|
||||
ENV GOPATH /go
|
||||
ENV PATH "$PATH:/go/bin"
|
||||
RUN mkdir -p /go/src/github.com/tendermint/tendermint && \
|
||||
apk add --no-cache go build-base git && \
|
||||
cd /go/src/github.com/tendermint/tendermint && \
|
||||
git clone https://github.com/tendermint/tendermint . && \
|
||||
git checkout develop && \
|
||||
make get_tools && \
|
||||
make get_vendor_deps && \
|
||||
make install && \
|
||||
cd - && \
|
||||
rm -rf /go/src/github.com/tendermint/tendermint && \
|
||||
apk del go build-base git
|
||||
|
||||
VOLUME $DATA_ROOT
|
||||
|
||||
EXPOSE 26656
|
||||
EXPOSE 26657
|
||||
|
||||
ENTRYPOINT ["tendermint"]
|
||||
|
||||
CMD ["node", "--moniker=`hostname`", "--proxy_app=kvstore"]
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.12
|
||||
FROM golang:1.10.1
|
||||
|
||||
|
||||
# Grab deps (jq, hexdump, xxd, killall)
|
||||
|
||||
@@ -13,7 +13,4 @@ build_testing:
|
||||
push_develop:
|
||||
docker push "tendermint/tendermint:develop"
|
||||
|
||||
build_amazonlinux_buildimage:
|
||||
docker build -t "tendermint/tendermint:build_c-amazonlinux" -f Dockerfile.build_c-amazonlinux .
|
||||
|
||||
.PHONY: build build_develop push push_develop
|
||||
|
||||
@@ -12,25 +12,28 @@
|
||||
- `0.9.1`, `0.9`, [(Dockerfile)](https://github.com/tendermint/tendermint/blob/809e0e8c5933604ba8b2d096803ada7c5ec4dfd3/DOCKER/Dockerfile)
|
||||
- `0.9.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/d474baeeea6c22b289e7402449572f7c89ee21da/DOCKER/Dockerfile)
|
||||
- `0.8.0`, `0.8` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/bf64dd21fdb193e54d8addaaaa2ecf7ac371de8c/DOCKER/Dockerfile)
|
||||
- `develop` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/master/DOCKER/Dockerfile.develop)
|
||||
|
||||
`develop` tag points to the [develop](https://github.com/tendermint/tendermint/tree/develop) branch.
|
||||
|
||||
## Quick reference
|
||||
|
||||
- **Where to get help:**
|
||||
[cosmos.network/ecosystem](https://cosmos.network/ecosystem)
|
||||
* **Where to get help:**
|
||||
https://cosmos.network/community
|
||||
|
||||
- **Where to file issues:**
|
||||
[Tendermint Issues](https://github.com/tendermint/tendermint/issues)
|
||||
* **Where to file issues:**
|
||||
https://github.com/tendermint/tendermint/issues
|
||||
|
||||
- **Supported Docker versions:**
|
||||
* **Supported Docker versions:**
|
||||
[the latest release](https://github.com/moby/moby/releases) (down to 1.6 on a best-effort basis)
|
||||
|
||||
## Tendermint
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language, and securely replicates it on many machines.
|
||||
|
||||
For more background, see the [the docs](https://tendermint.com/docs/introduction/#quick-start).
|
||||
For more background, see the [introduction](https://tendermint.readthedocs.io/en/master/introduction.html).
|
||||
|
||||
To get started developing applications, see the [application developers guide](https://tendermint.com/docs/introduction/quick-start.html).
|
||||
To get started developing applications, see the [application developers guide](https://tendermint.readthedocs.io/en/master/getting-started.html).
|
||||
|
||||
## How to use this image
|
||||
|
||||
@@ -45,7 +48,7 @@ docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app
|
||||
|
||||
## Local cluster
|
||||
|
||||
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/blob/master/Makefile) and run:
|
||||
To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/master/Makefile) and run:
|
||||
|
||||
```
|
||||
make build-linux
|
||||
@@ -57,7 +60,7 @@ Note that this will build and use a different image than the ones provided here.
|
||||
|
||||
## License
|
||||
|
||||
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/blob/master/LICENSE).
|
||||
- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/master/LICENSE).
|
||||
|
||||
## Contributing
|
||||
|
||||
|
||||
@@ -3,7 +3,7 @@ set -e
|
||||
|
||||
# Get the tag from the version, or try to figure it out.
|
||||
if [ -z "$TAG" ]; then
|
||||
TAG=$(awk -F\" '/TMCoreSemVer =/ { print $2; exit }' < ../version/version.go)
|
||||
TAG=$(awk -F\" '/Version =/ { print $2; exit }' < ../version/version.go)
|
||||
fi
|
||||
if [ -z "$TAG" ]; then
|
||||
echo "Please specify a tag."
|
||||
|
||||
@@ -3,7 +3,7 @@ set -e
|
||||
|
||||
# Get the tag from the version, or try to figure it out.
|
||||
if [ -z "$TAG" ]; then
|
||||
TAG=$(awk -F\" '/TMCoreSemVer =/ { print $2; exit }' < ../version/version.go)
|
||||
TAG=$(awk -F\" '/Version =/ { print $2; exit }' < ../version/version.go)
|
||||
fi
|
||||
if [ -z "$TAG" ]; then
|
||||
echo "Please specify a tag."
|
||||
|
||||
552
Gopkg.lock
generated
Normal file
552
Gopkg.lock
generated
Normal file
@@ -0,0 +1,552 @@
|
||||
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
|
||||
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
|
||||
name = "github.com/beorn7/perks"
|
||||
packages = ["quantile"]
|
||||
pruneopts = "UT"
|
||||
revision = "3a771d992973f24aa725d07868b467d1ddfceafb"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:c0decf632843204d2b8781de7b26e7038584e2dcccc7e2f401e88ae85b1df2b7"
|
||||
name = "github.com/btcsuite/btcd"
|
||||
packages = ["btcec"]
|
||||
pruneopts = "UT"
|
||||
revision = "67e573d211ace594f1366b4ce9d39726c4b19bd0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:1d8e1cb71c33a9470bbbae09bfec09db43c6bf358dfcae13cd8807c4e2a9a2bf"
|
||||
name = "github.com/btcsuite/btcutil"
|
||||
packages = [
|
||||
"base58",
|
||||
"bech32",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
|
||||
name = "github.com/davecgh/go-spew"
|
||||
packages = ["spew"]
|
||||
pruneopts = "UT"
|
||||
revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c7644c73a3d23741fdba8a99b1464e021a224b7e205be497271a8003a15ca41b"
|
||||
name = "github.com/ebuchman/fail-test"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "95f809107225be108efcf10a3509e4ea6ceef3c4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:544229a3ca0fb2dd5ebc2896d3d2ff7ce096d9751635301e44e37e761349ee70"
|
||||
name = "github.com/fortytw2/leaktest"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "a5ef70473c97b71626b9abeda80ee92ba2a7de9e"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:abeb38ade3f32a92943e5be54f55ed6d6e3b6602761d74b4aab4c9dd45c18abd"
|
||||
name = "github.com/fsnotify/fsnotify"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9"
|
||||
version = "v1.4.7"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:fdf5169073fb0ad6dc12a70c249145e30f4058647bea25f0abd48b6d9f228a11"
|
||||
name = "github.com/go-kit/kit"
|
||||
packages = [
|
||||
"log",
|
||||
"log/level",
|
||||
"log/term",
|
||||
"metrics",
|
||||
"metrics/discard",
|
||||
"metrics/internal/lv",
|
||||
"metrics/prometheus",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "4dc7be5d2d12881735283bcab7352178e190fc71"
|
||||
version = "v0.6.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:31a18dae27a29aa074515e43a443abfd2ba6deb6d69309d8d7ce789c45f34659"
|
||||
name = "github.com/go-logfmt/logfmt"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:586ea76dbd0374d6fb649a91d70d652b7fe0ccffb8910a77468e7702e7901f3d"
|
||||
name = "github.com/go-stack/stack"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "2fee6af1a9795aafbe0253a0cfbdf668e1fb8a9a"
|
||||
version = "v1.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:35621fe20f140f05a0c4ef662c26c0ab4ee50bca78aa30fe87d33120bd28165e"
|
||||
name = "github.com/gogo/protobuf"
|
||||
packages = [
|
||||
"gogoproto",
|
||||
"jsonpb",
|
||||
"proto",
|
||||
"protoc-gen-gogo/descriptor",
|
||||
"sortkeys",
|
||||
"types",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "636bf0302bc95575d69441b25a2603156ffdddf1"
|
||||
version = "v1.1.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:17fe264ee908afc795734e8c4e63db2accabaf57326dbf21763a7d6b86096260"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = [
|
||||
"proto",
|
||||
"ptypes",
|
||||
"ptypes/any",
|
||||
"ptypes/duration",
|
||||
"ptypes/timestamp",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:4a0c6bb4805508a6287675fac876be2ac1182539ca8a32468d8128882e9d5009"
|
||||
name = "github.com/golang/snappy"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:43dd08a10854b2056e615d1b1d22ac94559d822e1f8b6fcc92c1a1057e85188e"
|
||||
name = "github.com/gorilla/websocket"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ea40c24cdbacd054a6ae9de03e62c5f252479b96c716375aace5c120d68647c8"
|
||||
name = "github.com/hashicorp/hcl"
|
||||
packages = [
|
||||
".",
|
||||
"hcl/ast",
|
||||
"hcl/parser",
|
||||
"hcl/scanner",
|
||||
"hcl/strconv",
|
||||
"hcl/token",
|
||||
"json/parser",
|
||||
"json/scanner",
|
||||
"json/token",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "8cb6e5b959231cc1119e43259c4a608f9c51a241"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:870d441fe217b8e689d7949fef6e43efbc787e50f200cb1e70dbca9204a1d6be"
|
||||
name = "github.com/inconshreveable/mousetrap"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
version = "v1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:39b27d1381a30421f9813967a5866fba35dc1d4df43a6eefe3b7a5444cb07214"
|
||||
name = "github.com/jmhodges/levigo"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:a64e323dc06b73892e5bb5d040ced475c4645d456038333883f58934abbf6f72"
|
||||
name = "github.com/kr/logfmt"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c568d7727aa262c32bdf8a3f7db83614f7af0ed661474b24588de635c20024c7"
|
||||
name = "github.com/magiconair/properties"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "c2353362d570a7bfa228149c62842019201cfb71"
|
||||
version = "v1.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
|
||||
name = "github.com/matttproud/golang_protobuf_extensions"
|
||||
packages = ["pbutil"]
|
||||
pruneopts = "UT"
|
||||
revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
|
||||
version = "v1.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318"
|
||||
name = "github.com/mitchellh/mapstructure"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe"
|
||||
version = "v1.1.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:95741de3af260a92cc5c7f3f3061e85273f5a81b5db20d4bd68da74bd521675e"
|
||||
name = "github.com/pelletier/go-toml"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:40e195917a951a8bf867cd05de2a46aaf1806c50cf92eebf4c16f78cd196f747"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
|
||||
version = "v0.8.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
packages = ["difflib"]
|
||||
pruneopts = "UT"
|
||||
revision = "792786c7400a136282c1664665ae0a8db921c6c2"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c1a04665f9613e082e1209cf288bf64f4068dcd6c87a64bf1c4ff006ad422ba0"
|
||||
name = "github.com/prometheus/client_golang"
|
||||
packages = [
|
||||
"prometheus",
|
||||
"prometheus/promhttp",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "ae27198cdd90bf12cd134ad79d1366a6cf49f632"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4"
|
||||
name = "github.com/prometheus/client_model"
|
||||
packages = ["go"]
|
||||
pruneopts = "UT"
|
||||
revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:db712fde5d12d6cdbdf14b777f0c230f4ff5ab0be8e35b239fc319953ed577a4"
|
||||
name = "github.com/prometheus/common"
|
||||
packages = [
|
||||
"expfmt",
|
||||
"internal/bitbucket.org/ww/goautoneg",
|
||||
"model",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "7e9e6cabbd393fc208072eedef99188d0ce788b6"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:ef74914912f99c79434d9c09658274678bc85080ebe3ab32bec3940ebce5e1fc"
|
||||
name = "github.com/prometheus/procfs"
|
||||
packages = [
|
||||
".",
|
||||
"internal/util",
|
||||
"nfs",
|
||||
"xfs",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "185b4288413d2a0dd0806f78c90dde719829e5ae"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c4556a44e350b50a490544d9b06e9fba9c286c21d6c0e47f54f3a9214597298c"
|
||||
name = "github.com/rcrowley/go-metrics"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6a4a11ba764a56d2758899ec6f3848d24698d48442ebce85ee7a3f63284526cd"
|
||||
name = "github.com/spf13/afero"
|
||||
packages = [
|
||||
".",
|
||||
"mem",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "d40851caa0d747393da1ffb28f7f9d8b4eeffebd"
|
||||
version = "v1.1.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:516e71bed754268937f57d4ecb190e01958452336fa73dbac880894164e91c1f"
|
||||
name = "github.com/spf13/cast"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "8965335b8c7107321228e3e3702cab9832751bac"
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7ffc0983035bc7e297da3688d9fe19d60a420e9c38bef23f845c53788ed6a05e"
|
||||
name = "github.com/spf13/cobra"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "7b2c5ac9fc04fc5efafb60700713d4fa609b777b"
|
||||
version = "v0.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:68ea4e23713989dc20b1bded5d9da2c5f9be14ff9885beef481848edd18c26cb"
|
||||
name = "github.com/spf13/jwalterweatherman"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "4a4406e478ca629068e7768fc33f3f044173c0a6"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:c1b1102241e7f645bc8e0c22ae352e8f0dc6484b6cb4d132fa9f24174e0119e2"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "298182f68c66c05229eb03ac171abe6e309ee79a"
|
||||
version = "v1.0.3"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f8e1a678a2571e265f4bf91a3e5e32aa6b1474a55cb0ea849750cc177b664d96"
|
||||
name = "github.com/spf13/viper"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "25b30aa063fc18e48662b86996252eabdcf2f0c7"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7e8d267900c7fa7f35129a2a37596e38ed0f11ca746d6d9ba727980ee138f9f6"
|
||||
name = "github.com/stretchr/testify"
|
||||
packages = [
|
||||
"assert",
|
||||
"require",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
|
||||
version = "v1.2.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:59483b8e8183f10ab21a85ba1f4cbb4a2335d48891801f79ed7b9499f44d383c"
|
||||
name = "github.com/syndtr/goleveldb"
|
||||
packages = [
|
||||
"leveldb",
|
||||
"leveldb/cache",
|
||||
"leveldb/comparer",
|
||||
"leveldb/errors",
|
||||
"leveldb/filter",
|
||||
"leveldb/iterator",
|
||||
"leveldb/journal",
|
||||
"leveldb/memdb",
|
||||
"leveldb/opt",
|
||||
"leveldb/storage",
|
||||
"leveldb/table",
|
||||
"leveldb/util",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "6b91fda63f2e36186f1c9d0e48578defb69c5d43"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:605b6546f3f43745695298ec2d342d3e952b6d91cdf9f349bea9315f677d759f"
|
||||
name = "github.com/tendermint/btcd"
|
||||
packages = ["btcec"]
|
||||
pruneopts = "UT"
|
||||
revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5f52e817b6c9d52ddba70dece0ea31134d82a52c05bce98fbc739ab2a832df28"
|
||||
name = "github.com/tendermint/go-amino"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "cb07448b240918aa8d8df4505153549b86b77134"
|
||||
version = "v0.13.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:72b71e3a29775e5752ed7a8012052a3dee165e27ec18cedddae5288058f09acf"
|
||||
name = "golang.org/x/crypto"
|
||||
packages = [
|
||||
"bcrypt",
|
||||
"blowfish",
|
||||
"chacha20poly1305",
|
||||
"curve25519",
|
||||
"ed25519",
|
||||
"ed25519/internal/edwards25519",
|
||||
"hkdf",
|
||||
"internal/chacha20",
|
||||
"internal/subtle",
|
||||
"nacl/box",
|
||||
"nacl/secretbox",
|
||||
"openpgp/armor",
|
||||
"openpgp/errors",
|
||||
"poly1305",
|
||||
"ripemd160",
|
||||
"salsa20/salsa",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "3764759f34a542a3aef74d6b02e35be7ab893bba"
|
||||
source = "github.com/tendermint/crypto"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:d36f55a999540d29b6ea3c2ea29d71c76b1d9853fdcd3e5c5cb4836f2ba118f1"
|
||||
name = "golang.org/x/net"
|
||||
packages = [
|
||||
"context",
|
||||
"http/httpguts",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"internal/timeseries",
|
||||
"netutil",
|
||||
"trace",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:d1da39c9bac61327dbef1d8ef9f210425e99fd2924b6fb5f0bc587a193353637"
|
||||
name = "golang.org/x/sys"
|
||||
packages = [
|
||||
"cpu",
|
||||
"unix",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "8a28ead16f52c8aaeffbf79239b251dfdf6c4f96"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"language",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm",
|
||||
"unicode/rangetable",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:56b0bca90b7e5d1facf5fbdacba23e4e0ce069d25381b8e2f70ef1e7ebfb9c1a"
|
||||
name = "google.golang.org/genproto"
|
||||
packages = ["googleapis/rpc/status"]
|
||||
pruneopts = "UT"
|
||||
revision = "94acd270e44e65579b9ee3cdab25034d33fed608"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2dab32a43451e320e49608ff4542fdfc653c95dcc35d0065ec9c6c3dd540ed74"
|
||||
name = "google.golang.org/grpc"
|
||||
packages = [
|
||||
".",
|
||||
"balancer",
|
||||
"balancer/base",
|
||||
"balancer/roundrobin",
|
||||
"codes",
|
||||
"connectivity",
|
||||
"credentials",
|
||||
"encoding",
|
||||
"encoding/proto",
|
||||
"grpclog",
|
||||
"internal",
|
||||
"internal/backoff",
|
||||
"internal/channelz",
|
||||
"internal/grpcrand",
|
||||
"keepalive",
|
||||
"metadata",
|
||||
"naming",
|
||||
"peer",
|
||||
"resolver",
|
||||
"resolver/dns",
|
||||
"resolver/passthrough",
|
||||
"stats",
|
||||
"status",
|
||||
"tap",
|
||||
"transport",
|
||||
]
|
||||
pruneopts = "UT"
|
||||
revision = "168a6198bcb0ef175f7dacec0b8691fc141dc9b8"
|
||||
version = "v1.13.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:342378ac4dcb378a5448dd723f0784ae519383532f5e70ade24132c4c8693202"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
pruneopts = "UT"
|
||||
revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183"
|
||||
version = "v2.2.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
input-imports = [
|
||||
"github.com/btcsuite/btcutil/base58",
|
||||
"github.com/btcsuite/btcutil/bech32",
|
||||
"github.com/ebuchman/fail-test",
|
||||
"github.com/fortytw2/leaktest",
|
||||
"github.com/go-kit/kit/log",
|
||||
"github.com/go-kit/kit/log/level",
|
||||
"github.com/go-kit/kit/log/term",
|
||||
"github.com/go-kit/kit/metrics",
|
||||
"github.com/go-kit/kit/metrics/discard",
|
||||
"github.com/go-kit/kit/metrics/prometheus",
|
||||
"github.com/go-logfmt/logfmt",
|
||||
"github.com/gogo/protobuf/gogoproto",
|
||||
"github.com/gogo/protobuf/jsonpb",
|
||||
"github.com/gogo/protobuf/proto",
|
||||
"github.com/gogo/protobuf/types",
|
||||
"github.com/golang/protobuf/proto",
|
||||
"github.com/golang/protobuf/ptypes/timestamp",
|
||||
"github.com/gorilla/websocket",
|
||||
"github.com/jmhodges/levigo",
|
||||
"github.com/pkg/errors",
|
||||
"github.com/prometheus/client_golang/prometheus",
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp",
|
||||
"github.com/rcrowley/go-metrics",
|
||||
"github.com/spf13/cobra",
|
||||
"github.com/spf13/viper",
|
||||
"github.com/stretchr/testify/assert",
|
||||
"github.com/stretchr/testify/require",
|
||||
"github.com/syndtr/goleveldb/leveldb",
|
||||
"github.com/syndtr/goleveldb/leveldb/errors",
|
||||
"github.com/syndtr/goleveldb/leveldb/iterator",
|
||||
"github.com/syndtr/goleveldb/leveldb/opt",
|
||||
"github.com/tendermint/btcd/btcec",
|
||||
"github.com/tendermint/go-amino",
|
||||
"golang.org/x/crypto/bcrypt",
|
||||
"golang.org/x/crypto/chacha20poly1305",
|
||||
"golang.org/x/crypto/curve25519",
|
||||
"golang.org/x/crypto/ed25519",
|
||||
"golang.org/x/crypto/hkdf",
|
||||
"golang.org/x/crypto/nacl/box",
|
||||
"golang.org/x/crypto/nacl/secretbox",
|
||||
"golang.org/x/crypto/openpgp/armor",
|
||||
"golang.org/x/crypto/ripemd160",
|
||||
"golang.org/x/net/context",
|
||||
"golang.org/x/net/netutil",
|
||||
"google.golang.org/grpc",
|
||||
"google.golang.org/grpc/credentials",
|
||||
]
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
112
Gopkg.toml
Normal file
112
Gopkg.toml
Normal file
@@ -0,0 +1,112 @@
|
||||
# Gopkg.toml example
|
||||
#
|
||||
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
|
||||
# for detailed Gopkg.toml documentation.
|
||||
#
|
||||
# required = ["github.com/user/thing/cmd/thing"]
|
||||
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
|
||||
#
|
||||
# [[constraint]]
|
||||
# name = "github.com/user/project"
|
||||
# version = "1.0.0"
|
||||
#
|
||||
# [[override]]
|
||||
# name = "github.com/x/y"
|
||||
# version = "2.4.0"
|
||||
#
|
||||
# [prune]
|
||||
# non-go = false
|
||||
# go-tests = true
|
||||
# unused-packages = true
|
||||
#
|
||||
###########################################################
|
||||
# NOTE: All packages should be pinned to specific versions.
|
||||
# Packages without releases must pin to a commit.
|
||||
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/go-kit/kit"
|
||||
version = "=0.6.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gogo/protobuf"
|
||||
version = "=1.1.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/golang/protobuf"
|
||||
version = "=1.1.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/gorilla/websocket"
|
||||
version = "=1.2.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/pkg/errors"
|
||||
version = "=0.8.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/cobra"
|
||||
version = "=0.0.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/spf13/viper"
|
||||
version = "=1.0.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/stretchr/testify"
|
||||
version = "=1.2.1"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/go-amino"
|
||||
version = "v0.13.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "google.golang.org/grpc"
|
||||
version = "=1.13.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/fortytw2/leaktest"
|
||||
version = "=1.2.0"
|
||||
|
||||
###################################
|
||||
## Some repos dont have releases.
|
||||
## Pin to revision
|
||||
|
||||
[[constraint]]
|
||||
name = "golang.org/x/crypto"
|
||||
source = "github.com/tendermint/crypto"
|
||||
revision = "3764759f34a542a3aef74d6b02e35be7ab893bba"
|
||||
|
||||
[[override]]
|
||||
name = "github.com/jmhodges/levigo"
|
||||
revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/ebuchman/fail-test"
|
||||
revision = "95f809107225be108efcf10a3509e4ea6ceef3c4"
|
||||
|
||||
# last revision used by go-crypto
|
||||
[[constraint]]
|
||||
name = "github.com/btcsuite/btcutil"
|
||||
revision = "d4cc87b860166d00d6b5b9e0d3b3d71d6088d4d4"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/tendermint/btcd"
|
||||
revision = "e5840949ff4fff0c56f9b6a541e22b63581ea9df"
|
||||
|
||||
# Haven't made a release since 2016.
|
||||
[[constraint]]
|
||||
name = "github.com/prometheus/client_golang"
|
||||
revision = "ae27198cdd90bf12cd134ad79d1366a6cf49f632"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/rcrowley/go-metrics"
|
||||
revision = "e2704e165165ec55d062f5919b4b29494e9fa790"
|
||||
|
||||
[[constraint]]
|
||||
name = "golang.org/x/net"
|
||||
revision = "292b43bbf7cb8d35ddf40f8d5100ef3837cced3f"
|
||||
|
||||
[prune]
|
||||
go-tests = true
|
||||
unused-packages = true
|
||||
259
Makefile
259
Makefile
@@ -1,45 +1,42 @@
|
||||
GOTOOLS = \
|
||||
github.com/mitchellh/gox \
|
||||
github.com/golangci/golangci-lint/cmd/golangci-lint \
|
||||
github.com/golang/dep/cmd/dep \
|
||||
github.com/alecthomas/gometalinter \
|
||||
github.com/gogo/protobuf/protoc-gen-gogo \
|
||||
github.com/square/certstrap
|
||||
GOBIN?=${GOPATH}/bin
|
||||
PACKAGES=$(shell go list ./...)
|
||||
OUTPUT?=build/tendermint
|
||||
|
||||
INCLUDE = -I=. -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf
|
||||
BUILD_TAGS?='tendermint'
|
||||
LD_FLAGS = -X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD` -s -w
|
||||
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
|
||||
BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`"
|
||||
|
||||
LINT_FLAGS = --exclude '.*\.pb\.go' --exclude 'vendor/*' --vendor --deadline=600s
|
||||
|
||||
all: check build test install
|
||||
|
||||
# The below include contains the tools.
|
||||
include tools.mk
|
||||
include tests.mk
|
||||
check: check_tools get_vendor_deps
|
||||
|
||||
|
||||
########################################
|
||||
### Build Tendermint
|
||||
|
||||
build:
|
||||
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/
|
||||
CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/
|
||||
|
||||
build_c:
|
||||
CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) cleveldb" -o $(OUTPUT) ./cmd/tendermint/
|
||||
CGO_ENABLED=1 go build $(BUILD_FLAGS) -tags "$(BUILD_TAGS) gcc" -o build/tendermint ./cmd/tendermint/
|
||||
|
||||
build_race:
|
||||
CGO_ENABLED=1 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint
|
||||
CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint
|
||||
|
||||
install:
|
||||
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
|
||||
|
||||
install_c:
|
||||
CGO_ENABLED=1 go install $(BUILD_FLAGS) -tags "$(BUILD_TAGS) cleveldb" ./cmd/tendermint
|
||||
CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags $(BUILD_TAGS) ./cmd/tendermint
|
||||
|
||||
########################################
|
||||
### Protobuf
|
||||
|
||||
protoc_all: protoc_libs protoc_merkle protoc_abci protoc_grpc protoc_proto3types
|
||||
protoc_all: protoc_libs protoc_merkle protoc_abci protoc_grpc
|
||||
|
||||
%.pb.go: %.proto
|
||||
## If you get the following error,
|
||||
@@ -55,13 +52,11 @@ protoc_all: protoc_libs protoc_merkle protoc_abci protoc_grpc protoc_proto3types
|
||||
# see protobuf section above
|
||||
protoc_abci: abci/types/types.pb.go
|
||||
|
||||
protoc_proto3types: types/proto3/block.pb.go
|
||||
|
||||
build_abci:
|
||||
@go build -mod=readonly -i ./abci/cmd/...
|
||||
@go build -i ./abci/cmd/...
|
||||
|
||||
install_abci:
|
||||
@go install -mod=readonly ./abci/cmd/...
|
||||
@go install ./abci/cmd/...
|
||||
|
||||
########################################
|
||||
### Distribution
|
||||
@@ -71,6 +66,29 @@ install_abci:
|
||||
dist:
|
||||
@BUILD_TAGS=$(BUILD_TAGS) sh -c "'$(CURDIR)/scripts/dist.sh'"
|
||||
|
||||
########################################
|
||||
### Tools & dependencies
|
||||
|
||||
check_tools:
|
||||
@# https://stackoverflow.com/a/25668869
|
||||
@echo "Found tools: $(foreach tool,$(notdir $(GOTOOLS)),\
|
||||
$(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH")))"
|
||||
|
||||
get_tools:
|
||||
@echo "--> Installing tools"
|
||||
./scripts/get_tools.sh
|
||||
@echo "--> Downloading linters (this may take awhile)"
|
||||
$(GOPATH)/src/github.com/alecthomas/gometalinter/scripts/install.sh -b $(GOBIN)
|
||||
|
||||
update_tools:
|
||||
@echo "--> Updating tools"
|
||||
./scripts/get_tools.sh
|
||||
|
||||
#Update dependencies
|
||||
get_vendor_deps:
|
||||
@echo "--> Running dep"
|
||||
@dep ensure
|
||||
|
||||
#For ABCI and libs
|
||||
get_protoc:
|
||||
@# https://github.com/google/protobuf/releases
|
||||
@@ -84,16 +102,6 @@ get_protoc:
|
||||
cd .. && \
|
||||
rm -rf protobuf-3.6.1
|
||||
|
||||
go-mod-cache: go.sum
|
||||
@echo "--> Download go modules to local cache"
|
||||
@go mod download
|
||||
.PHONY: go-mod-cache
|
||||
|
||||
go.sum: go.mod
|
||||
@echo "--> Ensure dependencies have not been modified"
|
||||
@go mod verify
|
||||
@go mod tidy
|
||||
|
||||
draw_deps:
|
||||
@# requires brew install graphviz or apt-get install graphviz
|
||||
go get github.com/RobotsAndPencils/goviz
|
||||
@@ -101,7 +109,7 @@ draw_deps:
|
||||
|
||||
get_deps_bin_size:
|
||||
@# Copy of build recipe with additional flags to perform binary size analysis
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o $(OUTPUT) ./cmd/tendermint/ 2>&1))
|
||||
$(eval $(shell go build -work -a $(BUILD_FLAGS) -tags $(BUILD_TAGS) -o build/tendermint ./cmd/tendermint/ 2>&1))
|
||||
@find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log
|
||||
@echo "Results can be found here: $(CURDIR)/deps_bin_size.log"
|
||||
|
||||
@@ -110,36 +118,108 @@ get_deps_bin_size:
|
||||
|
||||
protoc_libs: libs/common/types.pb.go
|
||||
|
||||
# generates certificates for TLS testing in remotedb and RPC server
|
||||
gen_certs: clean_certs
|
||||
## Generating certificates for TLS testing...
|
||||
certstrap init --common-name "tendermint.com" --passphrase ""
|
||||
certstrap request-cert --common-name "remotedb" -ip "127.0.0.1" --passphrase ""
|
||||
certstrap sign "remotedb" --CA "tendermint.com" --passphrase ""
|
||||
mv out/remotedb.crt libs/db/remotedb/test.crt
|
||||
mv out/remotedb.key libs/db/remotedb/test.key
|
||||
certstrap request-cert --common-name "server" -ip "127.0.0.1" --passphrase ""
|
||||
certstrap sign "server" --CA "tendermint.com" --passphrase ""
|
||||
mv out/server.crt rpc/lib/server/test.crt
|
||||
mv out/server.key rpc/lib/server/test.key
|
||||
rm -rf out
|
||||
certstrap request-cert -ip "::" --passphrase ""
|
||||
certstrap sign "::" --CA "tendermint.com" --passphrase ""
|
||||
mv out/::.crt out/::.key db/remotedb
|
||||
|
||||
# deletes generated certificates
|
||||
clean_certs:
|
||||
rm -f libs/db/remotedb/test.crt
|
||||
rm -f libs/db/remotedb/test.key
|
||||
rm -f rpc/lib/server/test.crt
|
||||
rm -f rpc/lib/server/test.key
|
||||
## Cleaning TLS testing certificates...
|
||||
rm -rf out
|
||||
rm -f db/remotedb/::.crt db/remotedb/::.key
|
||||
|
||||
test_libs:
|
||||
go test -tags clevedb boltdb $(PACKAGES)
|
||||
test_libs: gen_certs
|
||||
GOCACHE=off go test -tags gcc $(PACKAGES)
|
||||
make clean_certs
|
||||
|
||||
grpc_dbserver:
|
||||
protoc -I libs/db/remotedb/proto/ libs/db/remotedb/proto/defs.proto --go_out=plugins=grpc:libs/db/remotedb/proto
|
||||
protoc -I db/remotedb/proto/ db/remotedb/proto/defs.proto --go_out=plugins=grpc:db/remotedb/proto
|
||||
|
||||
protoc_grpc: rpc/grpc/types.pb.go
|
||||
|
||||
protoc_merkle: crypto/merkle/merkle.pb.go
|
||||
|
||||
########################################
|
||||
### Testing
|
||||
|
||||
## required to be run first by most tests
|
||||
build_docker_test_image:
|
||||
docker build -t tester -f ./test/docker/Dockerfile .
|
||||
|
||||
### coverage, app, persistence, and libs tests
|
||||
test_cover:
|
||||
# run the go unit tests with coverage
|
||||
bash test/test_cover.sh
|
||||
|
||||
test_apps:
|
||||
# run the app tests using bash
|
||||
# requires `abci-cli` and `tendermint` binaries installed
|
||||
bash test/app/test.sh
|
||||
|
||||
test_abci_apps:
|
||||
bash abci/tests/test_app/test.sh
|
||||
|
||||
test_abci_cli:
|
||||
# test the cli against the examples in the tutorial at:
|
||||
# ./docs/abci-cli.md
|
||||
# if test fails, update the docs ^
|
||||
@ bash abci/tests/test_cli/test.sh
|
||||
|
||||
test_persistence:
|
||||
# run the persistence tests using bash
|
||||
# requires `abci-cli` installed
|
||||
docker run --name run_persistence -t tester bash test/persist/test_failure_indices.sh
|
||||
|
||||
# TODO undockerize
|
||||
# bash test/persist/test_failure_indices.sh
|
||||
|
||||
test_p2p:
|
||||
docker rm -f rsyslog || true
|
||||
rm -rf test/logs || true
|
||||
mkdir test/logs
|
||||
cd test/
|
||||
docker run -d -v "logs:/var/log/" -p 127.0.0.1:5514:514/udp --name rsyslog voxxit/rsyslog
|
||||
cd ..
|
||||
# requires 'tester' the image from above
|
||||
bash test/p2p/test.sh tester
|
||||
# the `docker cp` takes a really long time; uncomment for debugging
|
||||
#
|
||||
# mkdir -p test/p2p/logs && docker cp rsyslog:/var/log test/p2p/logs
|
||||
|
||||
test_integrations:
|
||||
make build_docker_test_image
|
||||
make get_tools
|
||||
make get_vendor_deps
|
||||
make install
|
||||
make test_cover
|
||||
make test_apps
|
||||
make test_abci_apps
|
||||
make test_abci_cli
|
||||
make test_libs
|
||||
make test_persistence
|
||||
make test_p2p
|
||||
|
||||
test_release:
|
||||
@go test -tags release $(PACKAGES)
|
||||
|
||||
test100:
|
||||
@for i in {1..100}; do make test; done
|
||||
|
||||
vagrant_test:
|
||||
vagrant up
|
||||
vagrant ssh -c 'make test_integrations'
|
||||
|
||||
### go tests
|
||||
test:
|
||||
@echo "--> Running go test"
|
||||
@GOCACHE=off go test -p 1 $(PACKAGES)
|
||||
|
||||
test_race:
|
||||
@echo "--> Running go test --race"
|
||||
@GOCACHE=off go test -p 1 -v -race $(PACKAGES)
|
||||
|
||||
|
||||
########################################
|
||||
### Formatting, linting, and vetting
|
||||
@@ -147,17 +227,54 @@ protoc_merkle: crypto/merkle/merkle.pb.go
|
||||
fmt:
|
||||
@go fmt ./...
|
||||
|
||||
lint:
|
||||
metalinter:
|
||||
@echo "--> Running linter"
|
||||
@golangci-lint run
|
||||
@gometalinter $(LINT_FLAGS) --disable-all \
|
||||
--enable=deadcode \
|
||||
--enable=gosimple \
|
||||
--enable=misspell \
|
||||
--enable=safesql \
|
||||
./...
|
||||
#--enable=gas \
|
||||
#--enable=maligned \
|
||||
#--enable=dupl \
|
||||
#--enable=errcheck \
|
||||
#--enable=goconst \
|
||||
#--enable=gocyclo \
|
||||
#--enable=goimports \
|
||||
#--enable=golint \ <== comments on anything exported
|
||||
#--enable=gotype \
|
||||
#--enable=ineffassign \
|
||||
#--enable=interfacer \
|
||||
#--enable=megacheck \
|
||||
#--enable=staticcheck \
|
||||
#--enable=structcheck \
|
||||
#--enable=unconvert \
|
||||
#--enable=unparam \
|
||||
#--enable=unused \
|
||||
#--enable=varcheck \
|
||||
#--enable=vet \
|
||||
#--enable=vetshadow \
|
||||
|
||||
metalinter_all:
|
||||
@echo "--> Running linter (all)"
|
||||
gometalinter $(LINT_FLAGS) --enable-all --disable=lll ./...
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
|
||||
rpc-docs:
|
||||
cat rpc/core/slate_header.txt > $(DESTINATION)
|
||||
godoc2md -template rpc/core/doc_template.txt github.com/tendermint/tendermint/rpc/core | grep -v -e "pipe.go" -e "routes.go" -e "dev.go" | sed 's,/src/target,https://github.com/tendermint/tendermint/tree/master/rpc/core,' >> $(DESTINATION)
|
||||
|
||||
check_dep:
|
||||
dep status >> /dev/null
|
||||
!(grep -n branch Gopkg.toml)
|
||||
|
||||
###########################################################
|
||||
### Docker image
|
||||
|
||||
build-docker:
|
||||
cp $(OUTPUT) DOCKER/tendermint
|
||||
cp build/tendermint DOCKER/tendermint
|
||||
docker build --label=tendermint --tag="tendermint/tendermint" DOCKER
|
||||
rm -rf DOCKER/tendermint
|
||||
|
||||
@@ -165,22 +282,16 @@ build-docker:
|
||||
### Local testnet using docker
|
||||
|
||||
# Build linux binary on other platforms
|
||||
build-linux: tools
|
||||
build-linux:
|
||||
GOOS=linux GOARCH=amd64 $(MAKE) build
|
||||
|
||||
build-docker-localnode:
|
||||
@cd networks/local && make
|
||||
|
||||
# Runs `make build_c` from within an Amazon Linux (v2)-based Docker build
|
||||
# container in order to build an Amazon Linux-compatible binary. Produces a
|
||||
# compatible binary at ./build/tendermint
|
||||
build_c-amazonlinux:
|
||||
$(MAKE) -C ./DOCKER build_amazonlinux_buildimage
|
||||
docker run --rm -it -v `pwd`:/tendermint tendermint/tendermint:build_c-amazonlinux
|
||||
cd networks/local
|
||||
make
|
||||
|
||||
# Run a 4-node testnet locally
|
||||
localnet-start: localnet-stop build-docker-localnode
|
||||
@if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --config /etc/tendermint/config-template.toml --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2; fi
|
||||
localnet-start: localnet-stop
|
||||
@if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi
|
||||
docker-compose up
|
||||
|
||||
# Stop testnet
|
||||
@@ -207,27 +318,11 @@ sentry-stop:
|
||||
@if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi
|
||||
cd networks/remote/terraform && terraform destroy -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub"
|
||||
|
||||
# Build hooks for dredd, to skip or add information on some steps
|
||||
build-contract-tests-hooks:
|
||||
ifeq ($(OS),Windows_NT)
|
||||
go build -mod=readonly $(BUILD_FLAGS) -o build/contract_tests.exe ./cmd/contract_tests
|
||||
else
|
||||
go build -mod=readonly $(BUILD_FLAGS) -o build/contract_tests ./cmd/contract_tests
|
||||
endif
|
||||
|
||||
# Run a nodejs tool to test endpoints against a localnet
|
||||
# The command takes care of starting and stopping the network
|
||||
# prerequisits: build-contract-tests-hooks build-linux
|
||||
# the two build commands were not added to let this command run from generic containers or machines.
|
||||
# The binaries should be built beforehand
|
||||
contract-tests:
|
||||
dredd
|
||||
# meant for the CI, inspect script & adapt accordingly
|
||||
build-slate:
|
||||
bash scripts/slate.sh
|
||||
|
||||
# To avoid unintended conflicts with file names, always add to .PHONY
|
||||
# unless there is a reason not to.
|
||||
# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html
|
||||
.PHONY: check build build_race build_abci dist install install_abci check_tools tools update_tools draw_deps \
|
||||
get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver fmt rpc-docs build-linux localnet-start \
|
||||
localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop protoc_grpc protoc_all \
|
||||
build_c install_c test_with_deadlock cleanup_after_test_with_deadlock lint build-contract-tests-hooks contract-tests \
|
||||
build_c-amazonlinux
|
||||
.PHONY: check build build_race build_abci dist install install_abci check_dep check_tools get_tools update_tools get_vendor_deps draw_deps get_protoc protoc_abci protoc_libs gen_certs clean_certs grpc_dbserver test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt rpc-docs build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate protoc_grpc protoc_all
|
||||
|
||||
158
PHILOSOPHY.md
158
PHILOSOPHY.md
@@ -1,158 +0,0 @@
|
||||
## Design goals
|
||||
|
||||
The design goals for Tendermint (and the SDK and related libraries) are:
|
||||
|
||||
* Simplicity and Legibility
|
||||
* Parallel performance, namely ability to utilize multicore architecture
|
||||
* Ability to evolve the codebase bug-free
|
||||
* Debuggability
|
||||
* Complete correctness that considers all edge cases, esp in concurrency
|
||||
* Future-proof modular architecture, message protocol, APIs, and encapsulation
|
||||
|
||||
|
||||
### Justification
|
||||
|
||||
Legibility is key to maintaining bug-free software as it evolves toward more
|
||||
optimizations, more ease of debugging, and additional features.
|
||||
|
||||
It is too easy to introduce bugs over time by replacing lines of code with
|
||||
those that may panic, which means ideally locks are unlocked by defer
|
||||
statements.
|
||||
|
||||
For example,
|
||||
|
||||
```go
|
||||
func (obj *MyObj) something() {
|
||||
mtx.Lock()
|
||||
obj.something = other
|
||||
mtx.Unlock()
|
||||
}
|
||||
```
|
||||
|
||||
It is too easy to refactor the codebase in the future to replace `other` with
|
||||
`other.String()` for example, and this may introduce a bug that causes a
|
||||
deadlock. So as much as reasonably possible, we need to be using defer
|
||||
statements, even though it introduces additional overhead.
|
||||
|
||||
If it is necessary to optimize the unlocking of mutex locks, the solution is
|
||||
more modularity via smaller functions, so that defer'd unlocks are scoped
|
||||
within a smaller function.
|
||||
|
||||
Similarly, idiomatic for-loops should always be preferred over those that use
|
||||
custom counters, because it is too easy to evolve the body of a for-loop to
|
||||
become more complicated over time, and it becomes more and more difficult to
|
||||
assess the correctness of such a for-loop by visual inspection.
|
||||
|
||||
|
||||
### On performance
|
||||
|
||||
It doesn't matter whether there are alternative implementations that are 2x or
|
||||
3x more performant, when the software doesn't work, deadlocks, or if bugs
|
||||
cannot be debugged. By taking advantage of multicore concurrency, the
|
||||
Tendermint implementation will at least be an order of magnitude within the
|
||||
range of what is theoretically possible. The design philosophy of Tendermint,
|
||||
and the choice of Go as implementation language, is designed to make Tendermint
|
||||
implementation the standard specification for concurrent BFT software.
|
||||
|
||||
By focusing on the message protocols (e.g. ABCI, p2p messages), and
|
||||
encapsulation e.g. IAVL module, (relatively) independent reactors, we are both
|
||||
implementing a standard implementation to be used as the specification for
|
||||
future implementations in more optimizable languages like Rust, Java, and C++;
|
||||
as well as creating sufficiently performant software. Tendermint Core will
|
||||
never be as fast as future implementations of the Tendermint Spec, because Go
|
||||
isn't designed to be as fast as possible. The advantage of using Go is that we
|
||||
can develop the whole stack of modular components **faster** than in other
|
||||
languages.
|
||||
|
||||
Furthermore, the real bottleneck is in the application layer, and it isn't
|
||||
necessary to support more than a sufficiently decentralized set of validators
|
||||
(e.g. 100 ~ 300 validators is sufficient, with delegated bonded PoS).
|
||||
|
||||
Instead of optimizing Tendermint performance down to the metal, lets focus on
|
||||
optimizing on other matters, namely ability to push feature complete software
|
||||
that works well enough, can be debugged and maintained, and can serve as a spec
|
||||
for future implementations.
|
||||
|
||||
|
||||
### On encapsulation
|
||||
|
||||
In order to create maintainable, forward-optimizable software, it is critical
|
||||
to develop well-encapsulated objects that have well understood properties, and
|
||||
to re-use these easy-to-use-correctly components as building blocks for further
|
||||
encapsulated meta-objects.
|
||||
|
||||
For example, mutexes are cheap enough for Tendermint's design goals when there
|
||||
isn't goroutine contention, so it is encouraged to create concurrency safe
|
||||
structures with struct-level mutexes. If they are used in the context of
|
||||
non-concurrent logic, then the performance is good enough. If they are used in
|
||||
the context of concurrent logic, then it will still perform correctly.
|
||||
|
||||
Examples of this design principle can be seen in the types.ValidatorSet struct,
|
||||
and the cmn.Rand struct. It's one single struct declaration that can be used
|
||||
in both concurrent and non-concurrent logic, and due to its well encapsulation,
|
||||
it's easy to get the usage of the mutex right.
|
||||
|
||||
#### example: cmn.Rand:
|
||||
|
||||
`The default Source is safe for concurrent use by multiple goroutines, but
|
||||
Sources created by NewSource are not`. The reason why the default
|
||||
package-level source is safe for concurrent use is because it is protected (see
|
||||
`lockedSource` in https://golang.org/src/math/rand/rand.go).
|
||||
|
||||
But we shouldn't rely on the global source, we should be creating our own
|
||||
Rand/Source instances and using them, especially for determinism in testing.
|
||||
So it is reasonable to have cmn.Rand be protected by a mutex. Whether we want
|
||||
our own implementation of Rand is another question, but the answer there is
|
||||
also in the affirmative. Sometimes you want to know where Rand is being used
|
||||
in your code, so it becomes a simple matter of dropping in a log statement to
|
||||
inject inspectability into Rand usage. Also, it is nice to be able to extend
|
||||
the functionality of Rand with custom methods. For these reasons, and for the
|
||||
reasons which is outlined in this design philosophy document, we should
|
||||
continue to use the cmn.Rand object, with mutex protection.
|
||||
|
||||
Another key aspect of good encapsulation is the choice of exposed vs unexposed
|
||||
methods. It should be clear to the reader of the code, which methods are
|
||||
intended to be used in what context, and what safe usage is. Part of this is
|
||||
solved by hiding methods via unexported methods. Another part of this is
|
||||
naming conventions on the methods (e.g. underscores) with good documentation,
|
||||
and code organization. If there are too many exposed methods and it isn't
|
||||
clear what methods have what side effects, then there is something wrong about
|
||||
the design of abstractions that should be revisited.
|
||||
|
||||
|
||||
### On concurrency
|
||||
|
||||
In order for Tendermint to remain relevant in the years to come, it is vital
|
||||
for Tendermint to take advantage of multicore architectures. Due to the nature
|
||||
of the problem, namely consensus across a concurrent p2p gossip network, and to
|
||||
handle RPC requests for a large number of consuming subscribers, it is
|
||||
unavoidable for Tendermint development to require expertise in concurrency
|
||||
design, especially when it comes to the reactor design, and also for RPC
|
||||
request handling.
|
||||
|
||||
|
||||
## Guidelines
|
||||
|
||||
Here are some guidelines for designing for (sufficient) performance and concurrency:
|
||||
|
||||
* Mutex locks are cheap enough when there isn't contention.
|
||||
* Do not optimize code without analytical or observed proof that it is in a hot path.
|
||||
* Don't over-use channels when mutex locks w/ encapsulation are sufficient.
|
||||
* The need to drain channels are often a hint of unconsidered edge cases.
|
||||
* The creation of O(N) one-off goroutines is generally technical debt that
|
||||
needs to get addressed sooner than later. Avoid creating too many
|
||||
goroutines as a patch around incomplete concurrency design, or at least be
|
||||
aware of the debt and do not invest in the debt. On the other hand, Tendermint
|
||||
is designed to have a limited number of peers (e.g. 10 or 20), so the creation
|
||||
of O(C) goroutines per O(P) peers is still O(C\*P=constant).
|
||||
* Use defer statements to unlock as much as possible. If you want to unlock sooner,
|
||||
try to create more modular functions that do make use of defer statements.
|
||||
|
||||
## Matras
|
||||
|
||||
* Premature optimization kills
|
||||
* Readability is paramount
|
||||
* Beautiful is better than fast.
|
||||
* In the face of ambiguity, refuse the temptation to guess.
|
||||
* In the face of bugs, refuse the temptation to cover the bug.
|
||||
* There should be one-- and preferably only one --obvious way to do it.
|
||||
144
README.md
144
README.md
@@ -1,21 +1,23 @@
|
||||
# Tendermint
|
||||
|
||||

|
||||
|
||||
[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance)
|
||||
[State Machines](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for short.
|
||||
[State Machine Replication](https://en.wikipedia.org/wiki/State_machine_replication).
|
||||
Or [Blockchain](https://en.wikipedia.org/wiki/Blockchain_(database)) for short.
|
||||
|
||||
[](https://github.com/tendermint/tendermint/releases/latest)
|
||||
[](https://godoc.org/github.com/tendermint/tendermint)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://godoc.org/github.com/tendermint/tendermint)
|
||||
[](https://github.com/moovweb/gvm)
|
||||
[](https://riot.im/app/#/room/#tendermint:matrix.org)
|
||||
[](https://github.com/tendermint/tendermint/blob/master/LICENSE)
|
||||
[](https://github.com/tendermint/tendermint)
|
||||
|
||||
| Branch | Tests | Coverage |
|
||||
| ------ | ---------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------ |
|
||||
| master | [](https://circleci.com/gh/tendermint/tendermint/tree/master) | [](https://codecov.io/gh/tendermint/tendermint) |
|
||||
|
||||
Branch | Tests | Coverage
|
||||
----------|-------|----------
|
||||
master | [](https://circleci.com/gh/tendermint/tendermint/tree/master) | [](https://codecov.io/gh/tendermint/tendermint)
|
||||
develop | [](https://circleci.com/gh/tendermint/tendermint/tree/develop) | [](https://codecov.io/gh/tendermint/tendermint)
|
||||
|
||||
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
|
||||
and securely replicates it on many machines.
|
||||
@@ -25,66 +27,83 @@ For protocol details, see [the specification](/docs/spec).
|
||||
For detailed analysis of the consensus protocol, including safety and liveness proofs,
|
||||
see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)".
|
||||
|
||||
## Releases
|
||||
## A Note on Production Readiness
|
||||
|
||||
NOTE: The master branch is now an active development branch (starting with `v0.32`). Please, do not depend on it and
|
||||
use [releases](https://github.com/tendermint/tendermint/releases) instead.
|
||||
|
||||
Tendermint is being used in production in both private and public environments,
|
||||
most notably the blockchains of the [Cosmos Network](https://cosmos.network/).
|
||||
However, we are still making breaking changes to the protocol and the APIs and have not yet released v1.0.
|
||||
See below for more details about [versioning](#versioning).
|
||||
While Tendermint is being used in production in private, permissioned
|
||||
environments, we are still working actively to harden and audit it in preparation
|
||||
for use in public blockchains, such as the [Cosmos Network](https://cosmos.network/).
|
||||
We are also still making breaking changes to the protocol and the APIs.
|
||||
Thus, we tag the releases as *alpha software*.
|
||||
|
||||
In any case, if you intend to run Tendermint in production,
|
||||
please [contact us](mailto:partners@tendermint.com) and [join the chat](https://riot.im/app/#/room/#tendermint:matrix.org).
|
||||
please [contact us](https://riot.im/app/#/room/#tendermint:matrix.org) :)
|
||||
|
||||
## Security
|
||||
|
||||
To report a security vulnerability, see our [bug bounty
|
||||
program](https://hackerone.com/tendermint)
|
||||
program](https://tendermint.com/security).
|
||||
|
||||
For examples of the kinds of bugs we're looking for, see [SECURITY.md](SECURITY.md)
|
||||
|
||||
## Minimum requirements
|
||||
|
||||
| Requirement | Notes |
|
||||
| ----------- | ------------------ |
|
||||
| Go version | Go1.11.4 or higher |
|
||||
Requirement|Notes
|
||||
---|---
|
||||
Go version | Go1.10 or higher
|
||||
|
||||
## Documentation
|
||||
|
||||
Complete documentation can be found on the [website](https://tendermint.com/docs/).
|
||||
|
||||
### Install
|
||||
## Install
|
||||
|
||||
See the [install instructions](/docs/introduction/install.md)
|
||||
|
||||
### Quick Start
|
||||
## Quick Start
|
||||
|
||||
- [Single node](/docs/introduction/quick-start.md)
|
||||
- [Local cluster using docker-compose](/docs/networks/docker-compose.md)
|
||||
- [Single node](/docs/tendermint-core/using-tendermint.md)
|
||||
- [Local cluster using docker-compose](/networks/local)
|
||||
- [Remote cluster using terraform and ansible](/docs/networks/terraform-and-ansible.md)
|
||||
- [Join the Cosmos testnet](https://cosmos.network/testnet)
|
||||
|
||||
## Resources
|
||||
|
||||
### Tendermint Core
|
||||
|
||||
For details about the blockchain data structures and the p2p protocols, see the
|
||||
the [Tendermint specification](/docs/spec).
|
||||
|
||||
For details on using the software, see the [documentation](/docs/) which is also
|
||||
hosted at: https://tendermint.com/docs/
|
||||
|
||||
### Tools
|
||||
|
||||
Benchmarking and monitoring is provided by `tm-bench` and `tm-monitor`, respectively.
|
||||
Their code is found [here](/tools) and these binaries need to be built seperately.
|
||||
Additional documentation is found [here](/docs/tools).
|
||||
|
||||
### Sub-projects
|
||||
|
||||
* [Amino](http://github.com/tendermint/go-amino), a reflection-based improvement on proto3
|
||||
* [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation
|
||||
|
||||
### Applications
|
||||
|
||||
* [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
||||
* [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
|
||||
* [Many more](https://tendermint.com/ecosystem)
|
||||
|
||||
### Research
|
||||
|
||||
* [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
|
||||
* [Original Whitepaper](https://tendermint.com/static/docs/tendermint.pdf)
|
||||
* [Blog](https://blog.cosmos.network/tendermint/home)
|
||||
|
||||
## Contributing
|
||||
|
||||
Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions,
|
||||
and the [contributing guidelines](CONTRIBUTING.md) when submitting code.
|
||||
|
||||
Join the larger community on the [forum](https://forum.cosmos.network/) and the [chat](https://riot.im/app/#/room/#tendermint:matrix.org).
|
||||
|
||||
To learn more about the structure of the software, watch the [Developer
|
||||
Sessions](/docs/DEV_SESSIONS.md) and read some [Architectural Decision
|
||||
Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture).
|
||||
|
||||
Learn more by reading the code and comparing it to the
|
||||
[specification](https://github.com/tendermint/tendermint/tree/develop/docs/spec).
|
||||
Yay open source! Please see our [contributing guidelines](CONTRIBUTING.md).
|
||||
|
||||
## Versioning
|
||||
|
||||
### Semantic Versioning
|
||||
### SemVer
|
||||
|
||||
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and how the version changes.
|
||||
Tendermint uses [SemVer](http://semver.org/) to determine when and how the version changes.
|
||||
According to SemVer, anything in the public API can change at any time before version 1.0.0
|
||||
|
||||
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
|
||||
@@ -95,7 +114,6 @@ include the in-process Go APIs.
|
||||
That said, breaking changes in the following packages will be documented in the
|
||||
CHANGELOG even if they don't lead to MINOR version bumps:
|
||||
|
||||
- crypto
|
||||
- types
|
||||
- rpc/client
|
||||
- config
|
||||
@@ -122,40 +140,8 @@ data into the new chain.
|
||||
However, any bump in the PATCH version should be compatible with existing histories
|
||||
(if not please open an [issue](https://github.com/tendermint/tendermint/issues)).
|
||||
|
||||
For more information on upgrading, see [UPGRADING.md](./UPGRADING.md)
|
||||
For more information on upgrading, see [here](./UPGRADING.md)
|
||||
|
||||
## Resources
|
||||
## Code of Conduct
|
||||
|
||||
### Tendermint Core
|
||||
|
||||
For details about the blockchain data structures and the p2p protocols, see the
|
||||
[Tendermint specification](/docs/spec).
|
||||
|
||||
For details on using the software, see the [documentation](/docs/) which is also
|
||||
hosted at: https://tendermint.com/docs/
|
||||
|
||||
### Tools
|
||||
|
||||
Benchmarking and monitoring is provided by `tm-bench` and `tm-monitor`, respectively.
|
||||
Their code is found [here](/tools) and these binaries need to be built seperately.
|
||||
Additional documentation is found [here](/docs/tools).
|
||||
|
||||
### Sub-projects
|
||||
|
||||
- [Amino](http://github.com/tendermint/go-amino), reflection-based proto3, with
|
||||
interfaces
|
||||
- [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation
|
||||
- [Tm-cmn](http://github.com/tendermint/tm-cmn), Commonly used libs across Tendermint & Cosmos repos
|
||||
|
||||
### Applications
|
||||
|
||||
- [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework
|
||||
- [Ethermint](http://github.com/cosmos/ethermint); Ethereum on Tendermint
|
||||
- [Many more](https://tendermint.com/ecosystem)
|
||||
|
||||
### Research
|
||||
|
||||
- [The latest gossip on BFT consensus](https://arxiv.org/abs/1807.04938)
|
||||
- [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769)
|
||||
- [Original Whitepaper](https://tendermint.com/static/docs/tendermint.pdf)
|
||||
- [Blog](https://blog.cosmos.network/tendermint/home)
|
||||
Please read, understand and adhere to our [code of conduct](CODE_OF_CONDUCT.md).
|
||||
|
||||
23
ROADMAP.md
Normal file
23
ROADMAP.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# Roadmap
|
||||
|
||||
BREAKING CHANGES:
|
||||
- Better support for injecting randomness
|
||||
- Upgrade consensus for more real-time use of evidence
|
||||
|
||||
FEATURES:
|
||||
- Use the chain as its own CA for nodes and validators
|
||||
- Tooling to run multiple blockchains/apps, possibly in a single process
|
||||
- State syncing (without transaction replay)
|
||||
- Add authentication and rate-limitting to the RPC
|
||||
|
||||
IMPROVEMENTS:
|
||||
- Improve subtleties around mempool caching and logic
|
||||
- Consensus optimizations:
|
||||
- cache block parts for faster agreement after round changes
|
||||
- propagate block parts rarest first
|
||||
- Better testing of the consensus state machine (ie. use a DSL)
|
||||
- Auto compiled serialization/deserialization code instead of go-wire reflection
|
||||
|
||||
BUG FIXES:
|
||||
- Graceful handling/recovery for apps that have non-determinism or fail to halt
|
||||
- Graceful handling/recovery for violations of safety, or liveness
|
||||
@@ -1,8 +1,7 @@
|
||||
# Security
|
||||
|
||||
As part of our [Coordinated Vulnerability Disclosure
|
||||
Policy](https://tendermint.com/security), we operate a [bug
|
||||
bounty](https://hackerone.com/tendermint).
|
||||
Policy](https://tendermint.com/security), we operate a bug bounty.
|
||||
See the policy for more details on submissions and rewards.
|
||||
|
||||
Here is a list of examples of the kinds of bugs we're most interested in:
|
||||
|
||||
306
UPGRADING.md
306
UPGRADING.md
@@ -3,303 +3,9 @@
|
||||
This guide provides steps to be followed when you upgrade your applications to
|
||||
a newer version of Tendermint Core.
|
||||
|
||||
## v0.32.0
|
||||
|
||||
This release is compatible with previous blockchains,
|
||||
however the new ABCI Events mechanism may create some complexity
|
||||
for nodes wishing to continue operation with v0.32 from a previous version.
|
||||
There are some minor breaking changes to the RPC.
|
||||
|
||||
### Config Changes
|
||||
|
||||
If you have `db_backend` set to `leveldb` in your config file, please change it
|
||||
to `goleveldb` or `cleveldb`.
|
||||
|
||||
### RPC Changes
|
||||
|
||||
The default listen address for the RPC is now `127.0.0.1`. If you want to expose
|
||||
it publicly, you have to explicitly configure it. Note exposing the RPC to the
|
||||
public internet may not be safe - endpoints which return a lot of data may
|
||||
enable resource exhaustion attacks on your node, causing the process to crash.
|
||||
|
||||
Any consumers of `/block_results` need to be mindful of the change in all field
|
||||
names from CamelCase to Snake case, eg. `results.DeliverTx` is now `results.deliver_tx`.
|
||||
This is a fix, but it's breaking.
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
ABCI responses which previously had a `Tags` field now have an `Events` field
|
||||
instead. The original `Tags` field was simply a list of key-value pairs, where
|
||||
each key effectively represented some attribute of an event occuring in the
|
||||
blockchain, like `sender`, `receiver`, or `amount`. However, it was difficult to
|
||||
represent the occurence of multiple events (for instance, multiple transfers) in a single list.
|
||||
The new `Events` field contains a list of `Event`, where each `Event` is itself a list
|
||||
of key-value pairs, allowing for more natural expression of multiple events in
|
||||
eg. a single DeliverTx or EndBlock. Note each `Event` also includes a `Type`, which is meant to categorize the
|
||||
event.
|
||||
|
||||
For transaction indexing, the index key is
|
||||
prefixed with the event type: `{eventType}.{attributeKey}`.
|
||||
If the same event type and attribute key appear multiple times, the values are
|
||||
appended in a list.
|
||||
|
||||
To make queries, include the event type as a prefix. For instance if you
|
||||
previously queried for `recipient = 'XYZ'`, and after the upgrade you name your event `transfer`,
|
||||
the new query would be for `transfer.recipient = 'XYZ'`.
|
||||
|
||||
Note that transactions indexed on a node before upgrading to v0.32 will still be indexed
|
||||
using the old scheme. For instance, if a node upgraded at height 100,
|
||||
transactions before 100 would be queried with `recipient = 'XYZ'` and
|
||||
transactions after 100 would be queried with `transfer.recipient = 'XYZ'`.
|
||||
While this presents additional complexity to clients, it avoids the need to
|
||||
reindex. Of course, you can reset the node and sync from scratch to re-index
|
||||
entirely using the new scheme.
|
||||
|
||||
We illustrate further with a more complete example.
|
||||
|
||||
Prior to the update, suppose your `ResponseDeliverTx` look like:
|
||||
|
||||
```go
|
||||
abci.ResponseDeliverTx{
|
||||
Tags: []cmn.KVPair{
|
||||
{Key: []byte("sender"), Value: []byte("foo")},
|
||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||
{Key: []byte("amount"), Value: []byte("35")},
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
The following queries would match this transaction:
|
||||
|
||||
```go
|
||||
query.MustParse("tm.event = 'Tx' AND sender = 'foo'")
|
||||
query.MustParse("tm.event = 'Tx' AND recipient = 'bar'")
|
||||
query.MustParse("tm.event = 'Tx' AND sender = 'foo' AND recipient = 'bar'")
|
||||
```
|
||||
|
||||
Following the upgrade, your `ResponseDeliverTx` would look something like:
|
||||
the following `Events`:
|
||||
|
||||
```go
|
||||
abci.ResponseDeliverTx{
|
||||
Events: []abci.Event{
|
||||
{
|
||||
Type: "transfer",
|
||||
Attributes: cmn.KVPairs{
|
||||
{Key: []byte("sender"), Value: []byte("foo")},
|
||||
{Key: []byte("recipient"), Value: []byte("bar")},
|
||||
{Key: []byte("amount"), Value: []byte("35")},
|
||||
},
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Now the following queries would match this transaction:
|
||||
|
||||
```go
|
||||
query.MustParse("tm.event = 'Tx' AND transfer.sender = 'foo'")
|
||||
query.MustParse("tm.event = 'Tx' AND transfer.recipient = 'bar'")
|
||||
query.MustParse("tm.event = 'Tx' AND transfer.sender = 'foo' AND transfer.recipient = 'bar'")
|
||||
```
|
||||
|
||||
For further documentation on `Events`, see the [docs](https://github.com/tendermint/tendermint/blob/60827f75623b92eff132dc0eff5b49d2025c591e/docs/spec/abci/abci.md#events).
|
||||
|
||||
### Go Applications
|
||||
|
||||
The ABCI Application interface changed slightly so the CheckTx and DeliverTx
|
||||
methods now take Request structs. The contents of these structs are just the raw
|
||||
tx bytes, which were previously passed in as the argument.
|
||||
|
||||
|
||||
## v0.31.6
|
||||
|
||||
There are no breaking changes in this release except Go API of p2p and
|
||||
mempool packages. Hovewer, if you're using cleveldb, you'll need to change
|
||||
the compilation tag:
|
||||
|
||||
Use `cleveldb` tag instead of `gcc` to compile Tendermint with CLevelDB or
|
||||
use `make build_c` / `make install_c` (full instructions can be found at
|
||||
https://tendermint.com/docs/introduction/install.html#compile-with-cleveldb-support)
|
||||
|
||||
## v0.31.0
|
||||
|
||||
This release contains a breaking change to the behaviour of the pubsub system.
|
||||
It also contains some minor breaking changes in the Go API and ABCI.
|
||||
There are no changes to the block or p2p protocols, so v0.31.0 should work fine
|
||||
with blockchains created from the v0.30 series.
|
||||
|
||||
### RPC
|
||||
|
||||
The pubsub no longer blocks on publishing. This may cause some WebSocket (WS) clients to stop working as expected.
|
||||
If your WS client is not consuming events fast enough, Tendermint can terminate the subscription.
|
||||
In this case, the WS client will receive an error with description:
|
||||
|
||||
```json
|
||||
{
|
||||
"jsonrpc": "2.0",
|
||||
"id": "{ID}#event",
|
||||
"error": {
|
||||
"code": -32000,
|
||||
"msg": "Server error",
|
||||
"data": "subscription was cancelled (reason: client is not pulling messages fast enough)" // or "subscription was cancelled (reason: Tendermint exited)"
|
||||
}
|
||||
}
|
||||
|
||||
Additionally, there are now limits on the number of subscribers and
|
||||
subscriptions that can be active at once. See the new
|
||||
`rpc.max_subscription_clients` and `rpc.max_subscriptions_per_client` values to
|
||||
configure this.
|
||||
```
|
||||
|
||||
### Applications
|
||||
|
||||
Simple rename of `ConsensusParams.BlockSize` to `ConsensusParams.Block`.
|
||||
|
||||
The `ConsensusParams.Block.TimeIotaMS` field was also removed. It's configured
|
||||
in the ConsensusParsm in genesis.
|
||||
|
||||
### Go API
|
||||
|
||||
See the [CHANGELOG](CHANGELOG.md). These are relatively straight forward.
|
||||
|
||||
## v0.30.0
|
||||
|
||||
This release contains a breaking change to both the block and p2p protocols,
|
||||
however it may be compatible with blockchains created with v0.29.0 depending on
|
||||
the chain history. If your blockchain has not included any pieces of evidence,
|
||||
or no piece of evidence has been included in more than one block,
|
||||
and if your application has never returned multiple updates
|
||||
for the same validator in a single block, then v0.30.0 will work fine with
|
||||
blockchains created with v0.29.0.
|
||||
|
||||
The p2p protocol change is to fix the proposer selection algorithm again.
|
||||
Note that proposer selection is purely a p2p concern right
|
||||
now since the algorithm is only relevant during real time consensus.
|
||||
This change is thus compatible with v0.29.0, but
|
||||
all nodes must be upgraded to avoid disagreements on the proposer.
|
||||
|
||||
### Applications
|
||||
|
||||
Applications must ensure they do not return duplicates in
|
||||
`ResponseEndBlock.ValidatorUpdates`. A pubkey must only appear once per set of
|
||||
updates. Duplicates will cause irrecoverable failure. If you have a very good
|
||||
reason why we shouldn't do this, please open an issue.
|
||||
|
||||
## v0.29.0
|
||||
|
||||
This release contains some breaking changes to the block and p2p protocols,
|
||||
and will not be compatible with any previous versions of the software, primarily
|
||||
due to changes in how various data structures are hashed.
|
||||
|
||||
Any implementations of Tendermint blockchain verification, including lite clients,
|
||||
will need to be updated. For specific details:
|
||||
- [Merkle tree](./docs/spec/blockchain/encoding.md#merkle-trees)
|
||||
- [ConsensusParams](./docs/spec/blockchain/state.md#consensusparams)
|
||||
|
||||
There was also a small change to field ordering in the vote struct. Any
|
||||
implementations of an out-of-process validator (like a Key-Management Server)
|
||||
will need to be updated. For specific details:
|
||||
- [Vote](https://github.com/tendermint/tendermint/blob/develop/docs/spec/consensus/signing.md#votes)
|
||||
|
||||
Finally, the proposer selection algorithm continues to evolve. See the
|
||||
[work-in-progress
|
||||
specification](https://github.com/tendermint/tendermint/pull/3140).
|
||||
|
||||
For everything else, please see the [CHANGELOG](./CHANGELOG.md#v0.29.0).
|
||||
|
||||
## v0.28.0
|
||||
|
||||
This release breaks the format for the `priv_validator.json` file
|
||||
and the protocol used for the external validator process.
|
||||
It is compatible with v0.27.0 blockchains (neither the BlockProtocol nor the
|
||||
P2PProtocol have changed).
|
||||
|
||||
Please read carefully for details about upgrading.
|
||||
|
||||
**Note:** Backup your `config/priv_validator.json`
|
||||
before proceeding.
|
||||
|
||||
### `priv_validator.json`
|
||||
|
||||
The `config/priv_validator.json` is now two files:
|
||||
`config/priv_validator_key.json` and `data/priv_validator_state.json`.
|
||||
The former contains the key material, the later contains the details on the last
|
||||
message signed.
|
||||
|
||||
When running v0.28.0 for the first time, it will back up any pre-existing
|
||||
`priv_validator.json` file and proceed to split it into the two new files.
|
||||
Upgrading should happen automatically without problem.
|
||||
|
||||
To upgrade manually, use the provided `privValUpgrade.go` script, with exact paths for the old
|
||||
`priv_validator.json` and the locations for the two new files. It's recomended
|
||||
to use the default paths, of `config/priv_validator_key.json` and
|
||||
`data/priv_validator_state.json`, respectively:
|
||||
|
||||
```
|
||||
go run scripts/privValUpgrade.go <old-path> <new-key-path> <new-state-path>
|
||||
```
|
||||
|
||||
### External validator signers
|
||||
|
||||
The Unix and TCP implementations of the remote signing validator
|
||||
have been consolidated into a single implementation.
|
||||
Thus in both cases, the external process is expected to dial
|
||||
Tendermint. This is different from how Unix sockets used to work, where
|
||||
Tendermint dialed the external process.
|
||||
|
||||
The `PubKeyMsg` was also split into separate `Request` and `Response` types
|
||||
for consistency with other messages.
|
||||
|
||||
Note that the TCP sockets don't yet use a persistent key,
|
||||
so while they're encrypted, they can't yet be properly authenticated.
|
||||
See [#3105](https://github.com/tendermint/tendermint/issues/3105).
|
||||
Note the Unix socket has neither encryption nor authentication, but will
|
||||
add a shared-secret in [#3099](https://github.com/tendermint/tendermint/issues/3099).
|
||||
|
||||
## v0.27.0
|
||||
|
||||
This release contains some breaking changes to the block and p2p protocols,
|
||||
but does not change any core data structures, so it should be compatible with
|
||||
existing blockchains from the v0.26 series that only used Ed25519 validator keys.
|
||||
Blockchains using Secp256k1 for validators will not be compatible. This is due
|
||||
to the fact that we now enforce which key types validators can use as a
|
||||
consensus param. The default is Ed25519, and Secp256k1 must be activated
|
||||
explicitly.
|
||||
|
||||
It is recommended to upgrade all nodes at once to avoid incompatibilities at the
|
||||
peer layer - namely, the heartbeat consensus message has been removed (only
|
||||
relevant if `create_empty_blocks=false` or `create_empty_blocks_interval > 0`),
|
||||
and the proposer selection algorithm has changed. Since proposer information is
|
||||
never included in the blockchain, this change only affects the peer layer.
|
||||
|
||||
### Go API Changes
|
||||
|
||||
#### libs/db
|
||||
|
||||
The ReverseIterator API has changed the meaning of `start` and `end`.
|
||||
Before, iteration was from `start` to `end`, where
|
||||
`start > end`. Now, iteration is from `end` to `start`, where `start < end`.
|
||||
The iterator also excludes `end`. This change allows a simplified and more
|
||||
intuitive logic, aligning the semantic meaning of `start` and `end` in the
|
||||
`Iterator` and `ReverseIterator`.
|
||||
|
||||
### Applications
|
||||
|
||||
This release enforces a new consensus parameter, the
|
||||
ValidatorParams.PubKeyTypes. Applications must ensure that they only return
|
||||
validator updates with the allowed PubKeyTypes. If a validator update includes a
|
||||
pubkey type that is not included in the ConsensusParams.Validator.PubKeyTypes,
|
||||
block execution will fail and the consensus will halt.
|
||||
|
||||
By default, only Ed25519 pubkeys may be used for validators. Enabling
|
||||
Secp256k1 requires explicit modification of the ConsensusParams.
|
||||
Please update your application accordingly (ie. restrict validators to only be
|
||||
able to use Ed25519 keys, or explicitly add additional key types to the genesis
|
||||
file).
|
||||
|
||||
## v0.26.0
|
||||
|
||||
This release contains a lot of changes to core data types and protocols. It is not
|
||||
New 0.26.0 release contains a lot of changes to core data types. It is not
|
||||
compatible to the old versions and there is no straight forward way to update
|
||||
old data to be compatible with the new version.
|
||||
|
||||
@@ -327,7 +33,7 @@ to `prove`. To get proofs with your queries, ensure you set `prove=true`.
|
||||
Various version fields like `amino_version`, `p2p_version`, `consensus_version`,
|
||||
and `rpc_version` have been removed from the `node_info.other` and are
|
||||
consolidated under the tendermint semantic version (ie. `node_info.version`) and
|
||||
the new `block` and `p2p` protocol versions under `node_info.protocol_version`.
|
||||
the new `block` and `p2p` protocol versions under `node_info.protocol_version`..
|
||||
|
||||
### ABCI Changes
|
||||
|
||||
@@ -339,7 +45,7 @@ protobuf file for these changes.
|
||||
|
||||
The `ResponseQuery.Proof` field is now structured as a `[]ProofOp` to support
|
||||
generalized Merkle tree constructions where the leaves of one Merkle tree are
|
||||
the root of another. If you don't need this functionality, and you used to
|
||||
the root of another. If you don't need this functionaluty, and you used to
|
||||
return `<proof bytes>` here, you should instead return a single `ProofOp` with
|
||||
just the `Data` field set:
|
||||
|
||||
@@ -361,7 +67,7 @@ For more information, see:
|
||||
|
||||
### Go API Changes
|
||||
|
||||
#### crypto/merkle
|
||||
#### crypto.merkle
|
||||
|
||||
The `merkle.Hasher` interface was removed. Functions which used to take `Hasher`
|
||||
now simply take `[]byte`. This means that any objects being Merklized should be
|
||||
@@ -373,10 +79,6 @@ The `node.RunForever` function was removed. Signal handling and running forever
|
||||
should instead be explicitly configured by the caller. See how we do it
|
||||
[here](https://github.com/tendermint/tendermint/blob/30519e8361c19f4bf320ef4d26288ebc621ad725/cmd/tendermint/commands/run_node.go#L60).
|
||||
|
||||
### Other
|
||||
|
||||
All hashes, except for public key addresses, are now 32-bytes.
|
||||
|
||||
## v0.25.0
|
||||
|
||||
This release has minimal impact.
|
||||
|
||||
12
Vagrantfile
vendored
12
Vagrantfile
vendored
@@ -29,14 +29,10 @@ Vagrant.configure("2") do |config|
|
||||
usermod -a -G docker vagrant
|
||||
|
||||
# install go
|
||||
wget -q https://dl.google.com/go/go1.12.linux-amd64.tar.gz
|
||||
tar -xvf go1.12.linux-amd64.tar.gz
|
||||
wget -q https://dl.google.com/go/go1.11.linux-amd64.tar.gz
|
||||
tar -xvf go1.11.linux-amd64.tar.gz
|
||||
mv go /usr/local
|
||||
rm -f go1.12.linux-amd64.tar.gz
|
||||
|
||||
# install nodejs (for docs)
|
||||
curl -sL https://deb.nodesource.com/setup_11.x | bash -
|
||||
apt-get install -y nodejs
|
||||
rm -f go1.11.linux-amd64.tar.gz
|
||||
|
||||
# cleanup
|
||||
apt-get autoremove -y
|
||||
@@ -57,6 +53,6 @@ Vagrant.configure("2") do |config|
|
||||
|
||||
# get all deps and tools, ready to install/test
|
||||
su - vagrant -c 'source /home/vagrant/.bash_profile'
|
||||
su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make tools'
|
||||
su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make get_tools && make get_vendor_deps'
|
||||
SHELL
|
||||
end
|
||||
|
||||
156
abci/README.md
156
abci/README.md
@@ -1,5 +1,7 @@
|
||||
# Application BlockChain Interface (ABCI)
|
||||
|
||||
[](https://circleci.com/gh/tendermint/abci)
|
||||
|
||||
Blockchains are systems for multi-master state machine replication.
|
||||
**ABCI** is an interface that defines the boundary between the replication engine (the blockchain),
|
||||
and the state machine (the application).
|
||||
@@ -10,28 +12,160 @@ Previously, the ABCI was referred to as TMSP.
|
||||
|
||||
The community has provided a number of addtional implementations, see the [Tendermint Ecosystem](https://tendermint.com/ecosystem)
|
||||
|
||||
|
||||
## Installation & Usage
|
||||
|
||||
To get up and running quickly, see the [getting started guide](../docs/app-dev/getting-started.md) along with the [abci-cli documentation](../docs/app-dev/abci-cli.md) which will go through the examples found in the [examples](./example/) directory.
|
||||
|
||||
## Specification
|
||||
|
||||
A detailed description of the ABCI methods and message types is contained in:
|
||||
|
||||
- [The main spec](../docs/spec/abci/abci.md)
|
||||
- [A protobuf file](./types/types.proto)
|
||||
- [A Go interface](./types/application.go)
|
||||
- [A prose specification](specification.md)
|
||||
- [A protobuf file](https://github.com/tendermint/tendermint/blob/master/abci/types/types.proto)
|
||||
- [A Go interface](https://github.com/tendermint/tendermint/blob/master/abci/types/application.go).
|
||||
|
||||
For more background information on ABCI, motivations, and tendermint, please visit [the documentation](https://tendermint.com/docs/).
|
||||
The two guides to focus on are the `Application Development Guide` and `Using ABCI-CLI`.
|
||||
|
||||
|
||||
## Protocol Buffers
|
||||
|
||||
To compile the protobuf file, run (from the root of the repo):
|
||||
To compile the protobuf file, run:
|
||||
|
||||
```
|
||||
make protoc_abci
|
||||
cd $GOPATH/src/github.com/tendermint/tendermint/; make protoc_abci
|
||||
```
|
||||
|
||||
See `protoc --help` and [the Protocol Buffers site](https://developers.google.com/protocol-buffers)
|
||||
for details on compiling for other languages. Note we also include a [GRPC](https://www.grpc.io/docs)
|
||||
for details on compiling for other languages. Note we also include a [GRPC](http://www.grpc.io/docs)
|
||||
service definition.
|
||||
|
||||
## Install ABCI-CLI
|
||||
|
||||
The `abci-cli` is a simple tool for debugging ABCI servers and running some
|
||||
example apps. To install it:
|
||||
|
||||
```
|
||||
mkdir -p $GOPATH/src/github.com/tendermint
|
||||
cd $GOPATH/src/github.com/tendermint
|
||||
git clone https://github.com/tendermint/tendermint.git
|
||||
cd tendermint
|
||||
make get_tools
|
||||
make get_vendor_deps
|
||||
make install_abci
|
||||
```
|
||||
|
||||
## Implementation
|
||||
|
||||
We provide three implementations of the ABCI in Go:
|
||||
|
||||
- Golang in-process
|
||||
- ABCI-socket
|
||||
- GRPC
|
||||
|
||||
Note the GRPC version is maintained primarily to simplify onboarding and prototyping and is not receiving the same
|
||||
attention to security and performance as the others
|
||||
|
||||
### In Process
|
||||
|
||||
The simplest implementation just uses function calls within Go.
|
||||
This means ABCI applications written in Golang can be compiled with TendermintCore and run as a single binary.
|
||||
|
||||
See the [examples](#examples) below for more information.
|
||||
|
||||
### Socket (TSP)
|
||||
|
||||
ABCI is best implemented as a streaming protocol.
|
||||
The socket implementation provides for asynchronous, ordered message passing over unix or tcp.
|
||||
Messages are serialized using Protobuf3 and length-prefixed with a [signed Varint](https://developers.google.com/protocol-buffers/docs/encoding?csw=1#signed-integers)
|
||||
|
||||
For example, if the Protobuf3 encoded ABCI message is `0xDEADBEEF` (4 bytes), the length-prefixed message is `0x08DEADBEEF`, since `0x08` is the signed varint
|
||||
encoding of `4`. If the Protobuf3 encoded ABCI message is 65535 bytes long, the length-prefixed message would be like `0xFEFF07...`.
|
||||
|
||||
Note the benefit of using this `varint` encoding over the old version (where integers were encoded as `<len of len><big endian len>` is that
|
||||
it is the standard way to encode integers in Protobuf. It is also generally shorter.
|
||||
|
||||
### GRPC
|
||||
|
||||
GRPC is an rpc framework native to Protocol Buffers with support in many languages.
|
||||
Implementing the ABCI using GRPC can allow for faster prototyping, but is expected to be much slower than
|
||||
the ordered, asynchronous socket protocol. The implementation has also not received as much testing or review.
|
||||
|
||||
Note the length-prefixing used in the socket implementation does not apply for GRPC.
|
||||
|
||||
## Usage
|
||||
|
||||
The `abci-cli` tool wraps an ABCI client and can be used for probing/testing an ABCI server.
|
||||
For instance, `abci-cli test` will run a test sequence against a listening server running the Counter application (see below).
|
||||
It can also be used to run some example applications.
|
||||
See [the documentation](https://tendermint.com/docs/) for more details.
|
||||
|
||||
### Examples
|
||||
|
||||
Check out the variety of example applications in the [example directory](example/).
|
||||
It also contains the code refered to by the `counter` and `kvstore` apps; these apps come
|
||||
built into the `abci-cli` binary.
|
||||
|
||||
#### Counter
|
||||
|
||||
The `abci-cli counter` application illustrates nonce checking in transactions. It's code looks like:
|
||||
|
||||
```golang
|
||||
func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
|
||||
app := counter.NewCounterApplication(flagSerial)
|
||||
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddrC, flagAbci, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait forever
|
||||
cmn.TrapSignal(func() {
|
||||
// Cleanup
|
||||
srv.Stop()
|
||||
})
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
and can be found in [this file](cmd/abci-cli/abci-cli.go).
|
||||
|
||||
#### kvstore
|
||||
|
||||
The `abci-cli kvstore` application, which illustrates a simple key-value Merkle tree
|
||||
|
||||
```golang
|
||||
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
// Create the application - in memory or persisted to disk
|
||||
var app types.Application
|
||||
if flagPersist == "" {
|
||||
app = kvstore.NewKVStoreApplication()
|
||||
} else {
|
||||
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
|
||||
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
|
||||
}
|
||||
|
||||
// Start the listener
|
||||
srv, err := server.NewServer(flagAddrD, flagAbci, app)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait forever
|
||||
cmn.TrapSignal(func() {
|
||||
// Cleanup
|
||||
srv.Stop()
|
||||
})
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
@@ -28,8 +28,8 @@ type Client interface {
|
||||
EchoAsync(msg string) *ReqRes
|
||||
InfoAsync(types.RequestInfo) *ReqRes
|
||||
SetOptionAsync(types.RequestSetOption) *ReqRes
|
||||
DeliverTxAsync(types.RequestDeliverTx) *ReqRes
|
||||
CheckTxAsync(types.RequestCheckTx) *ReqRes
|
||||
DeliverTxAsync(tx []byte) *ReqRes
|
||||
CheckTxAsync(tx []byte) *ReqRes
|
||||
QueryAsync(types.RequestQuery) *ReqRes
|
||||
CommitAsync() *ReqRes
|
||||
InitChainAsync(types.RequestInitChain) *ReqRes
|
||||
@@ -40,8 +40,8 @@ type Client interface {
|
||||
EchoSync(msg string) (*types.ResponseEcho, error)
|
||||
InfoSync(types.RequestInfo) (*types.ResponseInfo, error)
|
||||
SetOptionSync(types.RequestSetOption) (*types.ResponseSetOption, error)
|
||||
DeliverTxSync(types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
|
||||
CheckTxSync(types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||
DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error)
|
||||
CheckTxSync(tx []byte) (*types.ResponseCheckTx, error)
|
||||
QuerySync(types.RequestQuery) (*types.ResponseQuery, error)
|
||||
CommitSync() (*types.ResponseCommit, error)
|
||||
InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
@@ -105,8 +105,8 @@ func (reqRes *ReqRes) SetCallback(cb func(res *types.Response)) {
|
||||
return
|
||||
}
|
||||
|
||||
defer reqRes.mtx.Unlock()
|
||||
reqRes.cb = cb
|
||||
reqRes.mtx.Unlock()
|
||||
}
|
||||
|
||||
func (reqRes *ReqRes) GetCallback() func(*types.Response) {
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
context "golang.org/x/net/context"
|
||||
grpc "google.golang.org/grpc"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
@@ -39,7 +39,7 @@ func NewGRPCClient(addr string, mustConnect bool) *grpcClient {
|
||||
return cli
|
||||
}
|
||||
|
||||
func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
return cmn.Connect(addr)
|
||||
}
|
||||
|
||||
@@ -49,12 +49,12 @@ func (cli *grpcClient) OnStart() error {
|
||||
}
|
||||
RETRY_LOOP:
|
||||
for {
|
||||
conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc))
|
||||
if err != nil {
|
||||
if cli.mustConnect {
|
||||
return err
|
||||
}
|
||||
cli.Logger.Error(fmt.Sprintf("abci.grpcClient failed to connect to %v. Retrying...\n", cli.addr), "err", err)
|
||||
cli.Logger.Error(fmt.Sprintf("abci.grpcClient failed to connect to %v. Retrying...\n", cli.addr))
|
||||
time.Sleep(time.Second * dialRetryIntervalSeconds)
|
||||
continue RETRY_LOOP
|
||||
}
|
||||
@@ -65,7 +65,7 @@ RETRY_LOOP:
|
||||
|
||||
ENSURE_CONNECTED:
|
||||
for {
|
||||
_, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.WaitForReady(true))
|
||||
_, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.FailFast(true))
|
||||
if err == nil {
|
||||
break ENSURE_CONNECTED
|
||||
}
|
||||
@@ -111,8 +111,8 @@ func (cli *grpcClient) Error() error {
|
||||
// NOTE: callback may get internally generated flush responses.
|
||||
func (cli *grpcClient) SetResponseCallback(resCb Callback) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.resCb = resCb
|
||||
cli.mtx.Unlock()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -125,101 +125,101 @@ func (cli *grpcClient) SetResponseCallback(resCb Callback) {
|
||||
|
||||
func (cli *grpcClient) EchoAsync(msg string) *ReqRes {
|
||||
req := types.ToRequestEcho(msg)
|
||||
res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.Echo(context.Background(), req.GetEcho(), grpc.FailFast(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{Echo: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Echo{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) FlushAsync() *ReqRes {
|
||||
req := types.ToRequestFlush()
|
||||
res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.Flush(context.Background(), req.GetFlush(), grpc.FailFast(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{Flush: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Flush{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InfoAsync(params types.RequestInfo) *ReqRes {
|
||||
req := types.ToRequestInfo(params)
|
||||
res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.Info(context.Background(), req.GetInfo(), grpc.FailFast(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Info{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) SetOptionAsync(params types.RequestSetOption) *ReqRes {
|
||||
req := types.ToRequestSetOption(params)
|
||||
res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.SetOption(context.Background(), req.GetSetOption(), grpc.FailFast(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{SetOption: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_SetOption{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
req := types.ToRequestDeliverTx(params)
|
||||
res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.WaitForReady(true))
|
||||
func (cli *grpcClient) DeliverTxAsync(tx []byte) *ReqRes {
|
||||
req := types.ToRequestDeliverTx(tx)
|
||||
res, err := cli.client.DeliverTx(context.Background(), req.GetDeliverTx(), grpc.FailFast(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_DeliverTx{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTxAsync(params types.RequestCheckTx) *ReqRes {
|
||||
req := types.ToRequestCheckTx(params)
|
||||
res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.WaitForReady(true))
|
||||
func (cli *grpcClient) CheckTxAsync(tx []byte) *ReqRes {
|
||||
req := types.ToRequestCheckTx(tx)
|
||||
res, err := cli.client.CheckTx(context.Background(), req.GetCheckTx(), grpc.FailFast(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_CheckTx{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) QueryAsync(params types.RequestQuery) *ReqRes {
|
||||
req := types.ToRequestQuery(params)
|
||||
res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.Query(context.Background(), req.GetQuery(), grpc.FailFast(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{Query: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Query{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CommitAsync() *ReqRes {
|
||||
req := types.ToRequestCommit()
|
||||
res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.Commit(context.Background(), req.GetCommit(), grpc.FailFast(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{Commit: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_Commit{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InitChainAsync(params types.RequestInitChain) *ReqRes {
|
||||
req := types.ToRequestInitChain(params)
|
||||
res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.InitChain(context.Background(), req.GetInitChain(), grpc.FailFast(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_InitChain{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) BeginBlockAsync(params types.RequestBeginBlock) *ReqRes {
|
||||
req := types.ToRequestBeginBlock(params)
|
||||
res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.BeginBlock(context.Background(), req.GetBeginBlock(), grpc.FailFast(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_BeginBlock{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes {
|
||||
req := types.ToRequestEndBlock(params)
|
||||
res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.WaitForReady(true))
|
||||
res, err := cli.client.EndBlock(context.Background(), req.GetEndBlock(), grpc.FailFast(true))
|
||||
if err != nil {
|
||||
cli.StopForError(err)
|
||||
}
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
|
||||
return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{res}})
|
||||
}
|
||||
|
||||
func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes {
|
||||
@@ -228,22 +228,18 @@ func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response)
|
||||
reqres.Done() // Release waiters
|
||||
reqres.SetDone() // so reqRes.SetCallback will run the callback
|
||||
|
||||
// goroutine for callbacks
|
||||
// go routine for callbacks
|
||||
go func() {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
// Notify reqRes listener if set
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(res)
|
||||
}
|
||||
|
||||
// Notify client listener if set
|
||||
if cli.resCb != nil {
|
||||
cli.resCb(reqres.Request, res)
|
||||
}
|
||||
|
||||
// Notify reqRes listener if set
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(res)
|
||||
}
|
||||
}()
|
||||
|
||||
return reqres
|
||||
}
|
||||
|
||||
@@ -269,13 +265,13 @@ func (cli *grpcClient) SetOptionSync(req types.RequestSetOption) (*types.Respons
|
||||
return reqres.Response.GetSetOption(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxSync(params types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.DeliverTxAsync(params)
|
||||
func (cli *grpcClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.DeliverTxAsync(tx)
|
||||
return reqres.Response.GetDeliverTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTxSync(params types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
reqres := cli.CheckTxAsync(params)
|
||||
func (cli *grpcClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) {
|
||||
reqres := cli.CheckTxAsync(tx)
|
||||
return reqres.Response.GetCheckTx(), cli.Error()
|
||||
}
|
||||
|
||||
|
||||
@@ -9,13 +9,8 @@ import (
|
||||
|
||||
var _ Client = (*localClient)(nil)
|
||||
|
||||
// NOTE: use defer to unlock mutex because Application might panic (e.g., in
|
||||
// case of malicious tx or query). It only makes sense for publicly exposed
|
||||
// methods like CheckTx (/broadcast_tx_* RPC endpoint) or Query (/abci_query
|
||||
// RPC endpoint), but defers are used everywhere for the sake of consistency.
|
||||
type localClient struct {
|
||||
cmn.BaseService
|
||||
|
||||
mtx *sync.Mutex
|
||||
types.Application
|
||||
Callback
|
||||
@@ -35,8 +30,8 @@ func NewLocalClient(mtx *sync.Mutex, app types.Application) *localClient {
|
||||
|
||||
func (app *localClient) SetResponseCallback(cb Callback) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
app.Callback = cb
|
||||
app.mtx.Unlock()
|
||||
}
|
||||
|
||||
// TODO: change types.Application to include Error()?
|
||||
@@ -50,9 +45,6 @@ func (app *localClient) FlushAsync() *ReqRes {
|
||||
}
|
||||
|
||||
func (app *localClient) EchoAsync(msg string) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.callback(
|
||||
types.ToRequestEcho(msg),
|
||||
types.ToResponseEcho(msg),
|
||||
@@ -61,9 +53,8 @@ func (app *localClient) EchoAsync(msg string) *ReqRes {
|
||||
|
||||
func (app *localClient) InfoAsync(req types.RequestInfo) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Info(req)
|
||||
app.mtx.Unlock()
|
||||
return app.callback(
|
||||
types.ToRequestInfo(req),
|
||||
types.ToResponseInfo(res),
|
||||
@@ -72,42 +63,38 @@ func (app *localClient) InfoAsync(req types.RequestInfo) *ReqRes {
|
||||
|
||||
func (app *localClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.SetOption(req)
|
||||
app.mtx.Unlock()
|
||||
return app.callback(
|
||||
types.ToRequestSetOption(req),
|
||||
types.ToResponseSetOption(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxAsync(params types.RequestDeliverTx) *ReqRes {
|
||||
func (app *localClient) DeliverTxAsync(tx []byte) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.DeliverTx(params)
|
||||
res := app.Application.DeliverTx(tx)
|
||||
app.mtx.Unlock()
|
||||
return app.callback(
|
||||
types.ToRequestDeliverTx(params),
|
||||
types.ToRequestDeliverTx(tx),
|
||||
types.ToResponseDeliverTx(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes {
|
||||
func (app *localClient) CheckTxAsync(tx []byte) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.CheckTx(req)
|
||||
res := app.Application.CheckTx(tx)
|
||||
app.mtx.Unlock()
|
||||
return app.callback(
|
||||
types.ToRequestCheckTx(req),
|
||||
types.ToRequestCheckTx(tx),
|
||||
types.ToResponseCheckTx(res),
|
||||
)
|
||||
}
|
||||
|
||||
func (app *localClient) QueryAsync(req types.RequestQuery) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Query(req)
|
||||
app.mtx.Unlock()
|
||||
return app.callback(
|
||||
types.ToRequestQuery(req),
|
||||
types.ToResponseQuery(res),
|
||||
@@ -116,9 +103,8 @@ func (app *localClient) QueryAsync(req types.RequestQuery) *ReqRes {
|
||||
|
||||
func (app *localClient) CommitAsync() *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Commit()
|
||||
app.mtx.Unlock()
|
||||
return app.callback(
|
||||
types.ToRequestCommit(),
|
||||
types.ToResponseCommit(res),
|
||||
@@ -127,20 +113,19 @@ func (app *localClient) CommitAsync() *ReqRes {
|
||||
|
||||
func (app *localClient) InitChainAsync(req types.RequestInitChain) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.InitChain(req)
|
||||
return app.callback(
|
||||
reqRes := app.callback(
|
||||
types.ToRequestInitChain(req),
|
||||
types.ToResponseInitChain(res),
|
||||
)
|
||||
app.mtx.Unlock()
|
||||
return reqRes
|
||||
}
|
||||
|
||||
func (app *localClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.BeginBlock(req)
|
||||
app.mtx.Unlock()
|
||||
return app.callback(
|
||||
types.ToRequestBeginBlock(req),
|
||||
types.ToResponseBeginBlock(res),
|
||||
@@ -149,9 +134,8 @@ func (app *localClient) BeginBlockAsync(req types.RequestBeginBlock) *ReqRes {
|
||||
|
||||
func (app *localClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.EndBlock(req)
|
||||
app.mtx.Unlock()
|
||||
return app.callback(
|
||||
types.ToRequestEndBlock(req),
|
||||
types.ToResponseEndBlock(res),
|
||||
@@ -170,73 +154,64 @@ func (app *localClient) EchoSync(msg string) (*types.ResponseEcho, error) {
|
||||
|
||||
func (app *localClient) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Info(req)
|
||||
app.mtx.Unlock()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) SetOptionSync(req types.RequestSetOption) (*types.ResponseSetOption, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.SetOption(req)
|
||||
app.mtx.Unlock()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
func (app *localClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.DeliverTx(req)
|
||||
res := app.Application.DeliverTx(tx)
|
||||
app.mtx.Unlock()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
func (app *localClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.CheckTx(req)
|
||||
res := app.Application.CheckTx(tx)
|
||||
app.mtx.Unlock()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) QuerySync(req types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Query(req)
|
||||
app.mtx.Unlock()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CommitSync() (*types.ResponseCommit, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Commit()
|
||||
app.mtx.Unlock()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.InitChain(req)
|
||||
app.mtx.Unlock()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.BeginBlock(req)
|
||||
app.mtx.Unlock()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.EndBlock(req)
|
||||
app.mtx.Unlock()
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -26,17 +26,16 @@ var _ Client = (*socketClient)(nil)
|
||||
type socketClient struct {
|
||||
cmn.BaseService
|
||||
|
||||
addr string
|
||||
reqQueue chan *ReqRes
|
||||
flushTimer *cmn.ThrottleTimer
|
||||
mustConnect bool
|
||||
conn net.Conn
|
||||
|
||||
reqQueue chan *ReqRes
|
||||
flushTimer *cmn.ThrottleTimer
|
||||
|
||||
mtx sync.Mutex
|
||||
addr string
|
||||
conn net.Conn
|
||||
err error
|
||||
reqSent *list.List // list of requests sent, waiting for response
|
||||
resCb func(*types.Request, *types.Response) // called on all requests, if set.
|
||||
reqSent *list.List
|
||||
resCb func(*types.Request, *types.Response) // listens to all callbacks
|
||||
|
||||
}
|
||||
|
||||
@@ -55,6 +54,10 @@ func NewSocketClient(addr string, mustConnect bool) *socketClient {
|
||||
}
|
||||
|
||||
func (cli *socketClient) OnStart() error {
|
||||
if err := cli.BaseService.OnStart(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var err error
|
||||
var conn net.Conn
|
||||
RETRY_LOOP:
|
||||
@@ -64,7 +67,7 @@ RETRY_LOOP:
|
||||
if cli.mustConnect {
|
||||
return err
|
||||
}
|
||||
cli.Logger.Error(fmt.Sprintf("abci.socketClient failed to connect to %v. Retrying...", cli.addr), "err", err)
|
||||
cli.Logger.Error(fmt.Sprintf("abci.socketClient failed to connect to %v. Retrying...", cli.addr))
|
||||
time.Sleep(time.Second * dialRetryIntervalSeconds)
|
||||
continue RETRY_LOOP
|
||||
}
|
||||
@@ -78,12 +81,14 @@ RETRY_LOOP:
|
||||
}
|
||||
|
||||
func (cli *socketClient) OnStop() {
|
||||
cli.BaseService.OnStop()
|
||||
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
if cli.conn != nil {
|
||||
cli.conn.Close()
|
||||
}
|
||||
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.flushQueue()
|
||||
}
|
||||
|
||||
@@ -113,8 +118,8 @@ func (cli *socketClient) Error() error {
|
||||
// NOTE: callback may get internally generated flush responses.
|
||||
func (cli *socketClient) SetResponseCallback(resCb Callback) {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
cli.resCb = resCb
|
||||
cli.mtx.Unlock()
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -202,18 +207,16 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
reqres.Done() // Release waiters
|
||||
cli.reqSent.Remove(next) // Pop first item from linked list
|
||||
|
||||
// Notify client listener if set (global callback).
|
||||
if cli.resCb != nil {
|
||||
cli.resCb(reqres.Request, res)
|
||||
}
|
||||
|
||||
// Notify reqRes listener if set (request specific callback).
|
||||
// NOTE: it is possible this callback isn't set on the reqres object.
|
||||
// at this point, in which case it will be called after, when it is set.
|
||||
// Notify reqRes listener if set
|
||||
if cb := reqres.GetCallback(); cb != nil {
|
||||
cb(res)
|
||||
}
|
||||
|
||||
// Notify client listener if set
|
||||
if cli.resCb != nil {
|
||||
cli.resCb(reqres.Request, res)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -235,12 +238,12 @@ func (cli *socketClient) SetOptionAsync(req types.RequestSetOption) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestSetOption(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxAsync(req types.RequestDeliverTx) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestDeliverTx(req))
|
||||
func (cli *socketClient) DeliverTxAsync(tx []byte) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestDeliverTx(tx))
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxAsync(req types.RequestCheckTx) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestCheckTx(req))
|
||||
func (cli *socketClient) CheckTxAsync(tx []byte) *ReqRes {
|
||||
return cli.queueRequest(types.ToRequestCheckTx(tx))
|
||||
}
|
||||
|
||||
func (cli *socketClient) QueryAsync(req types.RequestQuery) *ReqRes {
|
||||
@@ -292,14 +295,14 @@ func (cli *socketClient) SetOptionSync(req types.RequestSetOption) (*types.Respo
|
||||
return reqres.Response.GetSetOption(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxSync(req types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestDeliverTx(req))
|
||||
func (cli *socketClient) DeliverTxSync(tx []byte) (*types.ResponseDeliverTx, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestDeliverTx(tx))
|
||||
cli.FlushSync()
|
||||
return reqres.Response.GetDeliverTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxSync(req types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestCheckTx(req))
|
||||
func (cli *socketClient) CheckTxSync(tx []byte) (*types.ResponseCheckTx, error) {
|
||||
reqres := cli.queueRequest(types.ToRequestCheckTx(tx))
|
||||
cli.FlushSync()
|
||||
return reqres.Response.GetCheckTx(), cli.Error()
|
||||
}
|
||||
|
||||
@@ -58,7 +58,7 @@ var RootCmd = &cobra.Command{
|
||||
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
|
||||
|
||||
switch cmd.Use {
|
||||
case "counter", "kvstore": // for the examples apps, don't pre-run
|
||||
case "counter", "kvstore", "dummy": // for the examples apps, don't pre-run
|
||||
return nil
|
||||
case "version": // skip running for version command
|
||||
return nil
|
||||
@@ -127,6 +127,10 @@ func addCounterFlags() {
|
||||
counterCmd.PersistentFlags().BoolVarP(&flagSerial, "serial", "", false, "enforce incrementing (serial) transactions")
|
||||
}
|
||||
|
||||
func addDummyFlags() {
|
||||
dummyCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
|
||||
}
|
||||
|
||||
func addKVStoreFlags() {
|
||||
kvstoreCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
|
||||
}
|
||||
@@ -148,6 +152,10 @@ func addCommands() {
|
||||
// examples
|
||||
addCounterFlags()
|
||||
RootCmd.AddCommand(counterCmd)
|
||||
// deprecated, left for backwards compatibility
|
||||
addDummyFlags()
|
||||
RootCmd.AddCommand(dummyCmd)
|
||||
// replaces dummy, see issue #196
|
||||
addKVStoreFlags()
|
||||
RootCmd.AddCommand(kvstoreCmd)
|
||||
}
|
||||
@@ -174,7 +182,9 @@ where example.file looks something like:
|
||||
info
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdBatch,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdBatch(cmd, args)
|
||||
},
|
||||
}
|
||||
|
||||
var consoleCmd = &cobra.Command{
|
||||
@@ -187,7 +197,9 @@ without opening a new connection each time
|
||||
`,
|
||||
Args: cobra.ExactArgs(0),
|
||||
ValidArgs: []string{"echo", "info", "set_option", "deliver_tx", "check_tx", "commit", "query"},
|
||||
RunE: cmdConsole,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdConsole(cmd, args)
|
||||
},
|
||||
}
|
||||
|
||||
var echoCmd = &cobra.Command{
|
||||
@@ -195,21 +207,27 @@ var echoCmd = &cobra.Command{
|
||||
Short: "have the application echo a message",
|
||||
Long: "have the application echo a message",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: cmdEcho,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdEcho(cmd, args)
|
||||
},
|
||||
}
|
||||
var infoCmd = &cobra.Command{
|
||||
Use: "info",
|
||||
Short: "get some info about the application",
|
||||
Long: "get some info about the application",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdInfo,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdInfo(cmd, args)
|
||||
},
|
||||
}
|
||||
var setOptionCmd = &cobra.Command{
|
||||
Use: "set_option",
|
||||
Short: "set an option on the application",
|
||||
Long: "set an option on the application",
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: cmdSetOption,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdSetOption(cmd, args)
|
||||
},
|
||||
}
|
||||
|
||||
var deliverTxCmd = &cobra.Command{
|
||||
@@ -217,7 +235,9 @@ var deliverTxCmd = &cobra.Command{
|
||||
Short: "deliver a new transaction to the application",
|
||||
Long: "deliver a new transaction to the application",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: cmdDeliverTx,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdDeliverTx(cmd, args)
|
||||
},
|
||||
}
|
||||
|
||||
var checkTxCmd = &cobra.Command{
|
||||
@@ -225,7 +245,9 @@ var checkTxCmd = &cobra.Command{
|
||||
Short: "validate a transaction",
|
||||
Long: "validate a transaction",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: cmdCheckTx,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdCheckTx(cmd, args)
|
||||
},
|
||||
}
|
||||
|
||||
var commitCmd = &cobra.Command{
|
||||
@@ -233,7 +255,9 @@ var commitCmd = &cobra.Command{
|
||||
Short: "commit the application state and return the Merkle root hash",
|
||||
Long: "commit the application state and return the Merkle root hash",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdCommit,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdCommit(cmd, args)
|
||||
},
|
||||
}
|
||||
|
||||
var versionCmd = &cobra.Command{
|
||||
@@ -252,7 +276,9 @@ var queryCmd = &cobra.Command{
|
||||
Short: "query the application state",
|
||||
Long: "query the application state",
|
||||
Args: cobra.ExactArgs(1),
|
||||
RunE: cmdQuery,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdQuery(cmd, args)
|
||||
},
|
||||
}
|
||||
|
||||
var counterCmd = &cobra.Command{
|
||||
@@ -260,7 +286,21 @@ var counterCmd = &cobra.Command{
|
||||
Short: "ABCI demo example",
|
||||
Long: "ABCI demo example",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdCounter,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdCounter(cmd, args)
|
||||
},
|
||||
}
|
||||
|
||||
// deprecated, left for backwards compatibility
|
||||
var dummyCmd = &cobra.Command{
|
||||
Use: "dummy",
|
||||
Deprecated: "use: [abci-cli kvstore] instead",
|
||||
Short: "ABCI demo example",
|
||||
Long: "ABCI demo example",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdKVStore(cmd, args)
|
||||
},
|
||||
}
|
||||
|
||||
var kvstoreCmd = &cobra.Command{
|
||||
@@ -268,7 +308,9 @@ var kvstoreCmd = &cobra.Command{
|
||||
Short: "ABCI demo example",
|
||||
Long: "ABCI demo example",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdKVStore,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdKVStore(cmd, args)
|
||||
},
|
||||
}
|
||||
|
||||
var testCmd = &cobra.Command{
|
||||
@@ -276,7 +318,9 @@ var testCmd = &cobra.Command{
|
||||
Short: "run integration tests",
|
||||
Long: "run integration tests",
|
||||
Args: cobra.ExactArgs(0),
|
||||
RunE: cmdTest,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return cmdTest(cmd, args)
|
||||
},
|
||||
}
|
||||
|
||||
// Generates new Args array based off of previous call args to maintain flag persistence
|
||||
@@ -332,18 +376,16 @@ func cmdTest(cmd *cobra.Command, args []string) error {
|
||||
|
||||
func cmdBatch(cmd *cobra.Command, args []string) error {
|
||||
bufReader := bufio.NewReader(os.Stdin)
|
||||
LOOP:
|
||||
for {
|
||||
|
||||
line, more, err := bufReader.ReadLine()
|
||||
switch {
|
||||
case more:
|
||||
if more {
|
||||
return errors.New("Input line is too long")
|
||||
case err == io.EOF:
|
||||
break LOOP
|
||||
case len(line) == 0:
|
||||
} else if err == io.EOF {
|
||||
break
|
||||
} else if len(line) == 0 {
|
||||
continue
|
||||
case err != nil:
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -372,6 +414,7 @@ func cmdConsole(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
|
||||
@@ -397,7 +440,7 @@ func muxOnCommands(cmd *cobra.Command, pArgs []string) error {
|
||||
}
|
||||
|
||||
// otherwise, we need to skip the next one too
|
||||
i++
|
||||
i += 1
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -524,7 +567,7 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
|
||||
res, err := client.DeliverTxSync(txBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -550,7 +593,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.CheckTxSync(types.RequestCheckTx{Tx: txBytes})
|
||||
res, err := client.CheckTxSync(txBytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -614,7 +657,9 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
|
||||
app := counter.NewCounterApplication(flagSerial)
|
||||
|
||||
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
|
||||
|
||||
// Start the listener
|
||||
@@ -627,14 +672,12 @@ func cmdCounter(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
cmn.TrapSignal(logger, func() {
|
||||
// Wait forever
|
||||
cmn.TrapSignal(func() {
|
||||
// Cleanup
|
||||
srv.Stop()
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
@@ -659,14 +702,12 @@ func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
cmn.TrapSignal(logger, func() {
|
||||
// Wait forever
|
||||
cmn.TrapSignal(func() {
|
||||
// Cleanup
|
||||
srv.Stop()
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
return nil
|
||||
}
|
||||
|
||||
//--------------------------------------------------------------------------------
|
||||
|
||||
@@ -42,15 +42,15 @@ func (app *CounterApplication) SetOption(req types.RequestSetOption) types.Respo
|
||||
return types.ResponseSetOption{}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
func (app *CounterApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
if len(tx) > 8 {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))}
|
||||
}
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
copy(tx8[len(tx8)-len(tx):], tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue != uint64(app.txCount) {
|
||||
return types.ResponseDeliverTx{
|
||||
@@ -62,15 +62,15 @@ func (app *CounterApplication) DeliverTx(req types.RequestDeliverTx) types.Respo
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
func (app *CounterApplication) CheckTx(tx []byte) types.ResponseCheckTx {
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
if len(tx) > 8 {
|
||||
return types.ResponseCheckTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))}
|
||||
}
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
copy(tx8[len(tx8)-len(tx):], tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue < uint64(app.txCount) {
|
||||
return types.ResponseCheckTx{
|
||||
|
||||
@@ -87,7 +87,7 @@ func testStream(t *testing.T, app types.Application) {
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
// Send request
|
||||
reqRes := client.DeliverTxAsync(types.RequestDeliverTx{Tx: []byte("test")})
|
||||
reqRes := client.DeliverTxAsync([]byte("test"))
|
||||
_ = reqRes
|
||||
// check err ?
|
||||
|
||||
@@ -107,7 +107,7 @@ func testStream(t *testing.T, app types.Application) {
|
||||
//-------------------------
|
||||
// test grpc
|
||||
|
||||
func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) {
|
||||
return cmn.Connect(addr)
|
||||
}
|
||||
|
||||
@@ -123,7 +123,7 @@ func testGRPCSync(t *testing.T, app *types.GRPCApplication) {
|
||||
defer server.Stop()
|
||||
|
||||
// Connect to the socket
|
||||
conn, err := grpc.Dial("unix://test.sock", grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
conn, err := grpc.Dial("unix://test.sock", grpc.WithInsecure(), grpc.WithDialer(dialerFunc))
|
||||
if err != nil {
|
||||
t.Fatalf("Error dialing GRPC server: %v", err.Error())
|
||||
}
|
||||
|
||||
@@ -22,10 +22,10 @@ and the Handshake allows any necessary blocks to be replayed.
|
||||
Validator set changes are effected using the following transaction format:
|
||||
|
||||
```
|
||||
"val:pubkey1!power1,pubkey2!power2,pubkey3!power3"
|
||||
val:pubkey1/power1,addr2/power2,addr3/power3"
|
||||
```
|
||||
|
||||
where `pubkeyN` is a base64-encoded 32-byte ed25519 key and `powerN` is a new voting power for the validator with `pubkeyN` (possibly a new one).
|
||||
To remove a validator from the validator set, set power to `0`.
|
||||
where `power1` is the new voting power for the validator with `pubkey1` (possibly a new one).
|
||||
There is no sybil protection against new validators joining.
|
||||
Validators can be removed by setting their power to `0`.
|
||||
|
||||
|
||||
@@ -9,8 +9,8 @@ import (
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -76,32 +76,25 @@ func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.Respon
|
||||
}
|
||||
|
||||
// tx is either "key=value" or just arbitrary bytes
|
||||
func (app *KVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
func (app *KVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
|
||||
var key, value []byte
|
||||
parts := bytes.Split(req.Tx, []byte("="))
|
||||
parts := bytes.Split(tx, []byte("="))
|
||||
if len(parts) == 2 {
|
||||
key, value = parts[0], parts[1]
|
||||
} else {
|
||||
key, value = req.Tx, req.Tx
|
||||
key, value = tx, tx
|
||||
}
|
||||
|
||||
app.state.db.Set(prefixKey(key), value)
|
||||
app.state.Size += 1
|
||||
|
||||
events := []types.Event{
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []cmn.KVPair{
|
||||
{Key: []byte("creator"), Value: []byte("Cosmoshi Netowoko")},
|
||||
{Key: []byte("key"), Value: key},
|
||||
},
|
||||
},
|
||||
tags := []cmn.KVPair{
|
||||
{Key: []byte("app.creator"), Value: []byte("Cosmoshi Netowoko")},
|
||||
{Key: []byte("app.key"), Value: key},
|
||||
}
|
||||
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags}
|
||||
}
|
||||
|
||||
func (app *KVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
func (app *KVStoreApplication) CheckTx(tx []byte) types.ResponseCheckTx {
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
|
||||
}
|
||||
|
||||
@@ -115,7 +108,6 @@ func (app *KVStoreApplication) Commit() types.ResponseCommit {
|
||||
return types.ResponseCommit{Data: appHash}
|
||||
}
|
||||
|
||||
// Returns an associated value or nil if missing.
|
||||
func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
if reqQuery.Prove {
|
||||
value := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
|
||||
@@ -18,17 +18,11 @@ import (
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
const (
|
||||
testKey = "abc"
|
||||
testValue = "def"
|
||||
)
|
||||
|
||||
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||
req := types.RequestDeliverTx{Tx: tx}
|
||||
ar := app.DeliverTx(req)
|
||||
ar := app.DeliverTx(tx)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// repeating tx doesn't raise error
|
||||
ar = app.DeliverTx(req)
|
||||
ar = app.DeliverTx(tx)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
|
||||
// make sure query is fine
|
||||
@@ -51,12 +45,12 @@ func testKVStore(t *testing.T, app types.Application, tx []byte, key, value stri
|
||||
|
||||
func TestKVStoreKV(t *testing.T) {
|
||||
kvstore := NewKVStoreApplication()
|
||||
key := testKey
|
||||
key := "abc"
|
||||
value := key
|
||||
tx := []byte(key)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
|
||||
value = testValue
|
||||
value = "def"
|
||||
tx = []byte(key + "=" + value)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
}
|
||||
@@ -67,12 +61,12 @@ func TestPersistentKVStoreKV(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
kvstore := NewPersistentKVStoreApplication(dir)
|
||||
key := testKey
|
||||
key := "abc"
|
||||
value := key
|
||||
tx := []byte(key)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
|
||||
value = testValue
|
||||
value = "def"
|
||||
tx = []byte(key + "=" + value)
|
||||
testKVStore(t, kvstore, tx, key, value)
|
||||
}
|
||||
@@ -95,7 +89,7 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
height = int64(1)
|
||||
hash := []byte("foo")
|
||||
header := types.Header{
|
||||
Height: height,
|
||||
Height: int64(height),
|
||||
}
|
||||
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
|
||||
kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
|
||||
@@ -153,7 +147,7 @@ func TestValUpdates(t *testing.T) {
|
||||
|
||||
makeApplyBlock(t, kvstore, 2, diff, tx1, tx2, tx3)
|
||||
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1]) // nolint: gocritic
|
||||
vals1 = append(vals[:nInit-2], vals[nInit+1])
|
||||
vals2 = kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
@@ -185,7 +179,7 @@ func makeApplyBlock(t *testing.T, kvstore types.Application, heightInt int, diff
|
||||
|
||||
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
|
||||
for _, tx := range txs {
|
||||
if r := kvstore.DeliverTx(types.RequestDeliverTx{Tx: tx}); r.IsErr() {
|
||||
if r := kvstore.DeliverTx(tx); r.IsErr() {
|
||||
t.Fatal(r)
|
||||
}
|
||||
}
|
||||
@@ -277,22 +271,22 @@ func TestClientServer(t *testing.T) {
|
||||
|
||||
func runClientTests(t *testing.T, client abcicli.Client) {
|
||||
// run some tests....
|
||||
key := testKey
|
||||
key := "abc"
|
||||
value := key
|
||||
tx := []byte(key)
|
||||
testClient(t, client, tx, key, value)
|
||||
|
||||
value = testValue
|
||||
value = "def"
|
||||
tx = []byte(key + "=" + value)
|
||||
testClient(t, client, tx, key, value)
|
||||
}
|
||||
|
||||
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
|
||||
ar, err := app.DeliverTxSync(types.RequestDeliverTx{Tx: tx})
|
||||
ar, err := app.DeliverTxSync(tx)
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// repeating tx doesn't raise error
|
||||
ar, err = app.DeliverTxSync(types.RequestDeliverTx{Tx: tx})
|
||||
ar, err = app.DeliverTxSync(tx)
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
|
||||
|
||||
@@ -2,17 +2,15 @@ package kvstore
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmtypes "github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -29,8 +27,6 @@ type PersistentKVStoreApplication struct {
|
||||
// validator set
|
||||
ValUpdates []types.ValidatorUpdate
|
||||
|
||||
valAddrToPubKeyMap map[string]types.PubKey
|
||||
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
@@ -44,9 +40,8 @@ func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication
|
||||
state := loadState(db)
|
||||
|
||||
return &PersistentKVStoreApplication{
|
||||
app: &KVStoreApplication{state: state},
|
||||
valAddrToPubKeyMap: make(map[string]types.PubKey),
|
||||
logger: log.NewNopLogger(),
|
||||
app: &KVStoreApplication{state: state},
|
||||
logger: log.NewNopLogger(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,22 +60,22 @@ func (app *PersistentKVStoreApplication) SetOption(req types.RequestSetOption) t
|
||||
return app.app.SetOption(req)
|
||||
}
|
||||
|
||||
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
// tx is either "val:pubkey/power" or "key=value" or just arbitrary bytes
|
||||
func (app *PersistentKVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
|
||||
// if it starts with "val:", update the validator set
|
||||
// format is "val:pubkey!power"
|
||||
if isValidatorTx(req.Tx) {
|
||||
// format is "val:pubkey/power"
|
||||
if isValidatorTx(tx) {
|
||||
// update validators in the merkle tree
|
||||
// and in app.ValUpdates
|
||||
return app.execValidatorTx(req.Tx)
|
||||
return app.execValidatorTx(tx)
|
||||
}
|
||||
|
||||
// otherwise, update the key-value store
|
||||
return app.app.DeliverTx(req)
|
||||
return app.app.DeliverTx(tx)
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
return app.app.CheckTx(req)
|
||||
func (app *PersistentKVStoreApplication) CheckTx(tx []byte) types.ResponseCheckTx {
|
||||
return app.app.CheckTx(tx)
|
||||
}
|
||||
|
||||
// Commit will panic if InitChain was not called
|
||||
@@ -88,20 +83,8 @@ func (app *PersistentKVStoreApplication) Commit() types.ResponseCommit {
|
||||
return app.app.Commit()
|
||||
}
|
||||
|
||||
// When path=/val and data={validator address}, returns the validator update (types.ValidatorUpdate) varint encoded.
|
||||
// For any other path, returns an associated value or nil if missing.
|
||||
func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
switch reqQuery.Path {
|
||||
case "/val":
|
||||
key := []byte("val:" + string(reqQuery.Data))
|
||||
value := app.app.state.db.Get(key)
|
||||
|
||||
resQuery.Key = reqQuery.Data
|
||||
resQuery.Value = value
|
||||
return
|
||||
default:
|
||||
return app.app.Query(reqQuery)
|
||||
}
|
||||
func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) types.ResponseQuery {
|
||||
return app.app.Query(reqQuery)
|
||||
}
|
||||
|
||||
// Save the validators in the merkle tree
|
||||
@@ -119,19 +102,6 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t
|
||||
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
// reset valset changes
|
||||
app.ValUpdates = make([]types.ValidatorUpdate, 0)
|
||||
|
||||
for _, ev := range req.ByzantineValidators {
|
||||
if ev.Type == tmtypes.ABCIEvidenceTypeDuplicateVote {
|
||||
// decrease voting power by 1
|
||||
if ev.TotalVotingPower == 0 {
|
||||
continue
|
||||
}
|
||||
app.updateValidator(types.ValidatorUpdate{
|
||||
PubKey: app.valAddrToPubKeyMap[string(ev.Validator.Address)],
|
||||
Power: ev.TotalVotingPower - 1,
|
||||
})
|
||||
}
|
||||
}
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
@@ -159,34 +129,33 @@ func (app *PersistentKVStoreApplication) Validators() (validators []types.Valida
|
||||
}
|
||||
|
||||
func MakeValSetChangeTx(pubkey types.PubKey, power int64) []byte {
|
||||
pubStr := base64.StdEncoding.EncodeToString(pubkey.Data)
|
||||
return []byte(fmt.Sprintf("val:%s!%d", pubStr, power))
|
||||
return []byte(fmt.Sprintf("val:%X/%d", pubkey.Data, power))
|
||||
}
|
||||
|
||||
func isValidatorTx(tx []byte) bool {
|
||||
return strings.HasPrefix(string(tx), ValidatorSetChangePrefix)
|
||||
}
|
||||
|
||||
// format is "val:pubkey!power"
|
||||
// pubkey is a base64-encoded 32-byte ed25519 key
|
||||
// format is "val:pubkey/power"
|
||||
// pubkey is raw 32-byte ed25519 key
|
||||
func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.ResponseDeliverTx {
|
||||
tx = tx[len(ValidatorSetChangePrefix):]
|
||||
|
||||
//get the pubkey and power
|
||||
pubKeyAndPower := strings.Split(string(tx), "!")
|
||||
pubKeyAndPower := strings.Split(string(tx), "/")
|
||||
if len(pubKeyAndPower) != 2 {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)}
|
||||
Log: fmt.Sprintf("Expected 'pubkey/power'. Got %v", pubKeyAndPower)}
|
||||
}
|
||||
pubkeyS, powerS := pubKeyAndPower[0], pubKeyAndPower[1]
|
||||
|
||||
// decode the pubkey
|
||||
pubkey, err := base64.StdEncoding.DecodeString(pubkeyS)
|
||||
pubkey, err := hex.DecodeString(pubkeyS)
|
||||
if err != nil {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)}
|
||||
Log: fmt.Sprintf("Pubkey (%s) is invalid hex", pubkeyS)}
|
||||
}
|
||||
|
||||
// decode the power
|
||||
@@ -198,26 +167,20 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon
|
||||
}
|
||||
|
||||
// update
|
||||
return app.updateValidator(types.Ed25519ValidatorUpdate(pubkey, power))
|
||||
return app.updateValidator(types.Ed25519ValidatorUpdate(pubkey, int64(power)))
|
||||
}
|
||||
|
||||
// add, update, or remove a validator
|
||||
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx {
|
||||
key := []byte("val:" + string(v.PubKey.Data))
|
||||
|
||||
pubkey := ed25519.PubKeyEd25519{}
|
||||
copy(pubkey[:], v.PubKey.Data)
|
||||
|
||||
if v.Power == 0 {
|
||||
// remove validator
|
||||
if !app.app.state.db.Has(key) {
|
||||
pubStr := base64.StdEncoding.EncodeToString(v.PubKey.Data)
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeUnauthorized,
|
||||
Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)}
|
||||
Log: fmt.Sprintf("Cannot remove non-existent validator %X", key)}
|
||||
}
|
||||
app.app.state.db.Delete(key)
|
||||
delete(app.valAddrToPubKeyMap, string(pubkey.Address()))
|
||||
} else {
|
||||
// add or update validator
|
||||
value := bytes.NewBuffer(make([]byte, 0))
|
||||
@@ -227,7 +190,6 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
|
||||
Log: fmt.Sprintf("Error encoding validator: %v", err)}
|
||||
}
|
||||
app.app.state.db.Set(key, value.Bytes())
|
||||
app.valAddrToPubKeyMap[string(pubkey.Address())] = v.PubKey
|
||||
}
|
||||
|
||||
// we only update the changes array if we successfully updated the tree
|
||||
|
||||
@@ -127,12 +127,11 @@ func (s *SocketServer) acceptConnectionsRoutine() {
|
||||
|
||||
func (s *SocketServer) waitForClose(closeConn chan error, connID int) {
|
||||
err := <-closeConn
|
||||
switch {
|
||||
case err == io.EOF:
|
||||
if err == io.EOF {
|
||||
s.Logger.Error("Connection was closed by client")
|
||||
case err != nil:
|
||||
} else if err != nil {
|
||||
s.Logger.Error("Connection error", "error", err)
|
||||
default:
|
||||
} else {
|
||||
// never happens
|
||||
s.Logger.Error("Connection was closed.")
|
||||
}
|
||||
@@ -147,16 +146,6 @@ func (s *SocketServer) waitForClose(closeConn chan error, connID int) {
|
||||
func (s *SocketServer) handleRequests(closeConn chan error, conn net.Conn, responses chan<- *types.Response) {
|
||||
var count int
|
||||
var bufReader = bufio.NewReader(conn)
|
||||
|
||||
defer func() {
|
||||
// make sure to recover from any app-related panics to allow proper socket cleanup
|
||||
r := recover()
|
||||
if r != nil {
|
||||
closeConn <- fmt.Errorf("recovered from panic: %v", r)
|
||||
s.appMtx.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
|
||||
var req = &types.Request{}
|
||||
@@ -165,7 +154,7 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn net.Conn, respo
|
||||
if err == io.EOF {
|
||||
closeConn <- err
|
||||
} else {
|
||||
closeConn <- fmt.Errorf("error reading message: %v", err)
|
||||
closeConn <- fmt.Errorf("Error reading message: %v", err.Error())
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -189,10 +178,10 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
res := s.app.SetOption(*r.SetOption)
|
||||
responses <- types.ToResponseSetOption(res)
|
||||
case *types.Request_DeliverTx:
|
||||
res := s.app.DeliverTx(*r.DeliverTx)
|
||||
res := s.app.DeliverTx(r.DeliverTx.Tx)
|
||||
responses <- types.ToResponseDeliverTx(res)
|
||||
case *types.Request_CheckTx:
|
||||
res := s.app.CheckTx(*r.CheckTx)
|
||||
res := s.app.CheckTx(r.CheckTx.Tx)
|
||||
responses <- types.ToResponseCheckTx(res)
|
||||
case *types.Request_Commit:
|
||||
res := s.app.Commit()
|
||||
|
||||
@@ -58,7 +58,7 @@ func Commit(client abcicli.Client, hashExp []byte) error {
|
||||
}
|
||||
|
||||
func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
|
||||
res, _ := client.DeliverTxSync(txBytes)
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
@@ -77,7 +77,7 @@ func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []
|
||||
}
|
||||
|
||||
func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.CheckTxSync(types.RequestCheckTx{Tx: txBytes})
|
||||
res, _ := client.CheckTxSync(txBytes)
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: CheckTx")
|
||||
|
||||
@@ -43,7 +43,7 @@ func commit(client abcicli.Client, hashExp []byte) {
|
||||
}
|
||||
|
||||
func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
|
||||
res, err := client.DeliverTxSync(types.RequestDeliverTx{Tx: txBytes})
|
||||
res, err := client.DeliverTxSync(txBytes)
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ set -e
|
||||
# These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it
|
||||
|
||||
# Get the directory of where this script is.
|
||||
export PATH="$GOBIN:$PATH"
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
|
||||
@@ -14,13 +13,13 @@ cd "$DIR"
|
||||
|
||||
echo "RUN COUNTER OVER SOCKET"
|
||||
# test golang counter
|
||||
ABCI_APP="counter" go run -mod=readonly ./*.go
|
||||
ABCI_APP="counter" go run ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
|
||||
echo "RUN COUNTER OVER GRPC"
|
||||
# test golang counter via grpc
|
||||
ABCI_APP="counter --abci=grpc" ABCI="grpc" go run -mod=readonly ./*.go
|
||||
ABCI_APP="counter --abci=grpc" ABCI="grpc" go run ./*.go
|
||||
echo "----------------------"
|
||||
|
||||
# test nodejs counter
|
||||
|
||||
@@ -2,7 +2,6 @@
|
||||
set -e
|
||||
|
||||
# Get the root directory.
|
||||
export PATH="$GOBIN:$PATH"
|
||||
SOURCE="${BASH_SOURCE[0]}"
|
||||
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
|
||||
DIR="$( cd -P "$( dirname "$SOURCE" )/../.." && pwd )"
|
||||
|
||||
@@ -15,12 +15,12 @@ type Application interface {
|
||||
Query(RequestQuery) ResponseQuery // Query for state
|
||||
|
||||
// Mempool Connection
|
||||
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
|
||||
CheckTx(tx []byte) ResponseCheckTx // Validate a tx for the mempool
|
||||
|
||||
// Consensus Connection
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain with validators and other info from TendermintCore
|
||||
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
|
||||
DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing
|
||||
DeliverTx(tx []byte) ResponseDeliverTx // Deliver a tx for full processing
|
||||
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
|
||||
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
|
||||
}
|
||||
@@ -45,11 +45,11 @@ func (BaseApplication) SetOption(req RequestSetOption) ResponseSetOption {
|
||||
return ResponseSetOption{}
|
||||
}
|
||||
|
||||
func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx {
|
||||
func (BaseApplication) DeliverTx(tx []byte) ResponseDeliverTx {
|
||||
return ResponseDeliverTx{Code: CodeTypeOK}
|
||||
}
|
||||
|
||||
func (BaseApplication) CheckTx(req RequestCheckTx) ResponseCheckTx {
|
||||
func (BaseApplication) CheckTx(tx []byte) ResponseCheckTx {
|
||||
return ResponseCheckTx{Code: CodeTypeOK}
|
||||
}
|
||||
|
||||
@@ -103,12 +103,12 @@ func (app *GRPCApplication) SetOption(ctx context.Context, req *RequestSetOption
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
|
||||
res := app.app.DeliverTx(*req)
|
||||
res := app.app.DeliverTx(req.Tx)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
|
||||
res := app.app.CheckTx(*req)
|
||||
res := app.app.CheckTx(req.Tx)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -93,15 +93,15 @@ func ToRequestSetOption(req RequestSetOption) *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestDeliverTx(req RequestDeliverTx) *Request {
|
||||
func ToRequestDeliverTx(tx []byte) *Request {
|
||||
return &Request{
|
||||
Value: &Request_DeliverTx{&req},
|
||||
Value: &Request_DeliverTx{&RequestDeliverTx{Tx: tx}},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestCheckTx(req RequestCheckTx) *Request {
|
||||
func ToRequestCheckTx(tx []byte) *Request {
|
||||
return &Request{
|
||||
Value: &Request_CheckTx{&req},
|
||||
Value: &Request_CheckTx{&RequestCheckTx{Tx: tx}},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,26 +8,21 @@ import (
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
func TestMarshalJSON(t *testing.T) {
|
||||
b, err := json.Marshal(&ResponseDeliverTx{})
|
||||
assert.Nil(t, err)
|
||||
// include empty fields.
|
||||
assert.True(t, strings.Contains(string(b), "code"))
|
||||
// Do not include empty fields.
|
||||
assert.False(t, strings.Contains(string(b), "code"))
|
||||
|
||||
r1 := ResponseCheckTx{
|
||||
Code: 1,
|
||||
Data: []byte("hello"),
|
||||
GasWanted: 43,
|
||||
Events: []Event{
|
||||
{
|
||||
Type: "testEvent",
|
||||
Attributes: []cmn.KVPair{
|
||||
{Key: []byte("pho"), Value: []byte("bo")},
|
||||
},
|
||||
},
|
||||
Tags: []cmn.KVPair{
|
||||
{Key: []byte("pho"), Value: []byte("bo")},
|
||||
},
|
||||
}
|
||||
b, err = json.Marshal(&r1)
|
||||
@@ -87,13 +82,8 @@ func TestWriteReadMessage2(t *testing.T) {
|
||||
Data: []byte(phrase),
|
||||
Log: phrase,
|
||||
GasWanted: 10,
|
||||
Events: []Event{
|
||||
{
|
||||
Type: "testEvent",
|
||||
Attributes: []cmn.KVPair{
|
||||
{Key: []byte("abc"), Value: []byte("def")},
|
||||
},
|
||||
},
|
||||
Tags: []cmn.KVPair{
|
||||
cmn.KVPair{Key: []byte("abc"), Value: []byte("def")},
|
||||
},
|
||||
},
|
||||
// TODO: add the rest
|
||||
|
||||
@@ -40,7 +40,7 @@ func main() {
|
||||
}
|
||||
if writeImportTime && !wroteImport {
|
||||
wroteImport = true
|
||||
fmt.Fprintf(outFile, "import \"github.com/tendermint/go-amino/data\"\n")
|
||||
fmt.Fprintf(outFile, "import \"github.com/tendermint/go-wire/data\"\n")
|
||||
|
||||
}
|
||||
if gotPackageLine {
|
||||
|
||||
@@ -42,12 +42,14 @@ func (r ResponseQuery) IsErr() bool {
|
||||
}
|
||||
|
||||
//---------------------------------------------------------------------------
|
||||
// override JSON marshalling so we emit defaults (ie. disable omitempty)
|
||||
// override JSON marshalling so we dont emit defaults (ie. disable omitempty)
|
||||
// note we need Unmarshal functions too because protobuf had the bright idea
|
||||
// to marshal int64->string. cool. cool, cool, cool: https://developers.google.com/protocol-buffers/docs/proto3#json
|
||||
|
||||
var (
|
||||
jsonpbMarshaller = jsonpb.Marshaler{
|
||||
EnumsAsInts: true,
|
||||
EmitDefaults: true,
|
||||
EmitDefaults: false,
|
||||
}
|
||||
jsonpbUnmarshaller = jsonpb.Unmarshaler{}
|
||||
)
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -4,9 +4,9 @@ package types;
|
||||
// For more information on gogo.proto, see:
|
||||
// https://github.com/gogo/protobuf/blob/master/extensions.md
|
||||
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
|
||||
import "github.com/tendermint/tendermint/crypto/merkle/merkle.proto";
|
||||
import "github.com/tendermint/tendermint/libs/common/types.proto";
|
||||
import "google/protobuf/timestamp.proto";
|
||||
import "github.com/tendermint/tendermint/libs/common/types.proto";
|
||||
import "github.com/tendermint/tendermint/crypto/merkle/merkle.proto";
|
||||
|
||||
// This file is copied from http://github.com/tendermint/abci
|
||||
// NOTE: When using custom types, mind the warnings.
|
||||
@@ -74,6 +74,7 @@ message RequestQuery {
|
||||
bool prove = 4;
|
||||
}
|
||||
|
||||
// NOTE: validators here have empty pubkeys.
|
||||
message RequestBeginBlock {
|
||||
bytes hash = 1;
|
||||
Header header = 2 [(gogoproto.nullable)=false];
|
||||
@@ -81,14 +82,8 @@ message RequestBeginBlock {
|
||||
repeated Evidence byzantine_validators = 4 [(gogoproto.nullable)=false];
|
||||
}
|
||||
|
||||
enum CheckTxType {
|
||||
New = 0;
|
||||
Recheck = 1;
|
||||
}
|
||||
|
||||
message RequestCheckTx {
|
||||
bytes tx = 1;
|
||||
CheckTxType type = 2;
|
||||
}
|
||||
|
||||
message RequestDeliverTx {
|
||||
@@ -171,7 +166,7 @@ message ResponseQuery {
|
||||
}
|
||||
|
||||
message ResponseBeginBlock {
|
||||
repeated Event events = 1 [(gogoproto.nullable)=false, (gogoproto.jsontag)="events,omitempty"];
|
||||
repeated common.KVPair tags = 1 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
|
||||
}
|
||||
|
||||
message ResponseCheckTx {
|
||||
@@ -181,7 +176,7 @@ message ResponseCheckTx {
|
||||
string info = 4; // nondeterministic
|
||||
int64 gas_wanted = 5;
|
||||
int64 gas_used = 6;
|
||||
repeated Event events = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="events,omitempty"];
|
||||
repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
|
||||
string codespace = 8;
|
||||
}
|
||||
|
||||
@@ -192,14 +187,14 @@ message ResponseDeliverTx {
|
||||
string info = 4; // nondeterministic
|
||||
int64 gas_wanted = 5;
|
||||
int64 gas_used = 6;
|
||||
repeated Event events = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="events,omitempty"];
|
||||
repeated common.KVPair tags = 7 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
|
||||
string codespace = 8;
|
||||
}
|
||||
|
||||
message ResponseEndBlock {
|
||||
repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable)=false];
|
||||
ConsensusParams consensus_param_updates = 2;
|
||||
repeated Event events = 3 [(gogoproto.nullable)=false, (gogoproto.jsontag)="events,omitempty"];
|
||||
repeated common.KVPair tags = 3 [(gogoproto.nullable)=false, (gogoproto.jsontag)="tags,omitempty"];
|
||||
}
|
||||
|
||||
message ResponseCommit {
|
||||
@@ -213,13 +208,12 @@ message ResponseCommit {
|
||||
// ConsensusParams contains all consensus-relevant parameters
|
||||
// that can be adjusted by the abci app
|
||||
message ConsensusParams {
|
||||
BlockParams block = 1;
|
||||
EvidenceParams evidence = 2;
|
||||
ValidatorParams validator = 3;
|
||||
BlockSize block_size = 1;
|
||||
EvidenceParams evidence_params = 2;
|
||||
}
|
||||
|
||||
// BlockParams contains limits on the block size.
|
||||
message BlockParams {
|
||||
// BlockSize contains limits on the block size.
|
||||
message BlockSize {
|
||||
// Note: must be greater than 0
|
||||
int64 max_bytes = 1;
|
||||
// Note: must be greater or equal to -1
|
||||
@@ -232,21 +226,11 @@ message EvidenceParams {
|
||||
int64 max_age = 1;
|
||||
}
|
||||
|
||||
// ValidatorParams contains limits on validators.
|
||||
message ValidatorParams {
|
||||
repeated string pub_key_types = 1;
|
||||
}
|
||||
|
||||
message LastCommitInfo {
|
||||
int32 round = 1;
|
||||
repeated VoteInfo votes = 2 [(gogoproto.nullable)=false];
|
||||
}
|
||||
|
||||
message Event {
|
||||
string type = 1;
|
||||
repeated common.KVPair attributes = 2 [(gogoproto.nullable)=false, (gogoproto.jsontag)="attributes,omitempty"];
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Blockchain Types
|
||||
|
||||
|
||||
@@ -3,21 +3,19 @@
|
||||
|
||||
package types
|
||||
|
||||
import (
|
||||
fmt "fmt"
|
||||
_ "github.com/gogo/protobuf/gogoproto"
|
||||
github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb"
|
||||
github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
|
||||
proto "github.com/gogo/protobuf/proto"
|
||||
golang_proto "github.com/golang/protobuf/proto"
|
||||
_ "github.com/golang/protobuf/ptypes/timestamp"
|
||||
_ "github.com/tendermint/tendermint/crypto/merkle"
|
||||
_ "github.com/tendermint/tendermint/libs/common"
|
||||
math "math"
|
||||
math_rand "math/rand"
|
||||
testing "testing"
|
||||
time "time"
|
||||
)
|
||||
import testing "testing"
|
||||
import math_rand "math/rand"
|
||||
import time "time"
|
||||
import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
|
||||
import github_com_gogo_protobuf_jsonpb "github.com/gogo/protobuf/jsonpb"
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import golang_proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
import _ "github.com/gogo/protobuf/gogoproto"
|
||||
import _ "github.com/golang/protobuf/ptypes/timestamp"
|
||||
import _ "github.com/tendermint/tendermint/crypto/merkle"
|
||||
import _ "github.com/tendermint/tendermint/libs/common"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
@@ -1481,15 +1479,15 @@ func TestConsensusParamsMarshalTo(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockParamsProto(t *testing.T) {
|
||||
func TestBlockSizeProto(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedBlockParams(popr, false)
|
||||
p := NewPopulatedBlockSize(popr, false)
|
||||
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &BlockParams{}
|
||||
msg := &BlockSize{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
@@ -1512,10 +1510,10 @@ func TestBlockParamsProto(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockParamsMarshalTo(t *testing.T) {
|
||||
func TestBlockSizeMarshalTo(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedBlockParams(popr, false)
|
||||
p := NewPopulatedBlockSize(popr, false)
|
||||
size := p.Size()
|
||||
dAtA := make([]byte, size)
|
||||
for i := range dAtA {
|
||||
@@ -1525,7 +1523,7 @@ func TestBlockParamsMarshalTo(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &BlockParams{}
|
||||
msg := &BlockSize{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
@@ -1593,62 +1591,6 @@ func TestEvidenceParamsMarshalTo(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatorParamsProto(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedValidatorParams(popr, false)
|
||||
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &ValidatorParams{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
littlefuzz := make([]byte, len(dAtA))
|
||||
copy(littlefuzz, dAtA)
|
||||
for i := range dAtA {
|
||||
dAtA[i] = byte(popr.Intn(256))
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
if len(littlefuzz) > 0 {
|
||||
fuzzamount := 100
|
||||
for i := 0; i < fuzzamount; i++ {
|
||||
littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))
|
||||
littlefuzz = append(littlefuzz, byte(popr.Intn(256)))
|
||||
}
|
||||
// shouldn't panic
|
||||
_ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatorParamsMarshalTo(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedValidatorParams(popr, false)
|
||||
size := p.Size()
|
||||
dAtA := make([]byte, size)
|
||||
for i := range dAtA {
|
||||
dAtA[i] = byte(popr.Intn(256))
|
||||
}
|
||||
_, err := p.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &ValidatorParams{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
for i := range dAtA {
|
||||
dAtA[i] = byte(popr.Intn(256))
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLastCommitInfoProto(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
@@ -1705,62 +1647,6 @@ func TestLastCommitInfoMarshalTo(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventProto(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedEvent(popr, false)
|
||||
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &Event{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
littlefuzz := make([]byte, len(dAtA))
|
||||
copy(littlefuzz, dAtA)
|
||||
for i := range dAtA {
|
||||
dAtA[i] = byte(popr.Intn(256))
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
if len(littlefuzz) > 0 {
|
||||
fuzzamount := 100
|
||||
for i := 0; i < fuzzamount; i++ {
|
||||
littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256))
|
||||
littlefuzz = append(littlefuzz, byte(popr.Intn(256)))
|
||||
}
|
||||
// shouldn't panic
|
||||
_ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventMarshalTo(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedEvent(popr, false)
|
||||
size := p.Size()
|
||||
dAtA := make([]byte, size)
|
||||
for i := range dAtA {
|
||||
dAtA[i] = byte(popr.Intn(256))
|
||||
}
|
||||
_, err := p.MarshalTo(dAtA)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &Event{}
|
||||
if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
for i := range dAtA {
|
||||
dAtA[i] = byte(popr.Intn(256))
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeaderProto(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
@@ -2733,16 +2619,16 @@ func TestConsensusParamsJSON(t *testing.T) {
|
||||
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
func TestBlockParamsJSON(t *testing.T) {
|
||||
func TestBlockSizeJSON(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedBlockParams(popr, true)
|
||||
p := NewPopulatedBlockSize(popr, true)
|
||||
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
|
||||
jsondata, err := marshaler.MarshalToString(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &BlockParams{}
|
||||
msg := &BlockSize{}
|
||||
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
@@ -2769,24 +2655,6 @@ func TestEvidenceParamsJSON(t *testing.T) {
|
||||
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
func TestValidatorParamsJSON(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedValidatorParams(popr, true)
|
||||
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
|
||||
jsondata, err := marshaler.MarshalToString(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &ValidatorParams{}
|
||||
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
func TestLastCommitInfoJSON(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
@@ -2805,24 +2673,6 @@ func TestLastCommitInfoJSON(t *testing.T) {
|
||||
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
func TestEventJSON(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedEvent(popr, true)
|
||||
marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{}
|
||||
jsondata, err := marshaler.MarshalToString(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
msg := &Event{}
|
||||
err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
func TestHeaderJSON(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
@@ -3713,12 +3563,12 @@ func TestConsensusParamsProtoCompactText(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockParamsProtoText(t *testing.T) {
|
||||
func TestBlockSizeProtoText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedBlockParams(popr, true)
|
||||
p := NewPopulatedBlockSize(popr, true)
|
||||
dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p)
|
||||
msg := &BlockParams{}
|
||||
msg := &BlockSize{}
|
||||
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
@@ -3727,12 +3577,12 @@ func TestBlockParamsProtoText(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockParamsProtoCompactText(t *testing.T) {
|
||||
func TestBlockSizeProtoCompactText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedBlockParams(popr, true)
|
||||
p := NewPopulatedBlockSize(popr, true)
|
||||
dAtA := github_com_gogo_protobuf_proto.CompactTextString(p)
|
||||
msg := &BlockParams{}
|
||||
msg := &BlockSize{}
|
||||
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
@@ -3769,34 +3619,6 @@ func TestEvidenceParamsProtoCompactText(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatorParamsProtoText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedValidatorParams(popr, true)
|
||||
dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p)
|
||||
msg := &ValidatorParams{}
|
||||
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatorParamsProtoCompactText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedValidatorParams(popr, true)
|
||||
dAtA := github_com_gogo_protobuf_proto.CompactTextString(p)
|
||||
msg := &ValidatorParams{}
|
||||
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLastCommitInfoProtoText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
@@ -3825,34 +3647,6 @@ func TestLastCommitInfoProtoCompactText(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventProtoText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedEvent(popr, true)
|
||||
dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p)
|
||||
msg := &Event{}
|
||||
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventProtoCompactText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedEvent(popr, true)
|
||||
dAtA := github_com_gogo_protobuf_proto.CompactTextString(p)
|
||||
msg := &Event{}
|
||||
if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
if !p.Equal(msg) {
|
||||
t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeaderProtoText(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
@@ -4677,10 +4471,10 @@ func TestConsensusParamsSize(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockParamsSize(t *testing.T) {
|
||||
func TestBlockSizeSize(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedBlockParams(popr, true)
|
||||
p := NewPopulatedBlockSize(popr, true)
|
||||
size2 := github_com_gogo_protobuf_proto.Size(p)
|
||||
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
@@ -4721,28 +4515,6 @@ func TestEvidenceParamsSize(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestValidatorParamsSize(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedValidatorParams(popr, true)
|
||||
size2 := github_com_gogo_protobuf_proto.Size(p)
|
||||
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
size := p.Size()
|
||||
if len(dAtA) != size {
|
||||
t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA))
|
||||
}
|
||||
if size2 != size {
|
||||
t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2)
|
||||
}
|
||||
size3 := github_com_gogo_protobuf_proto.Size(p)
|
||||
if size3 != size {
|
||||
t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLastCommitInfoSize(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
@@ -4765,28 +4537,6 @@ func TestLastCommitInfoSize(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestEventSize(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
p := NewPopulatedEvent(popr, true)
|
||||
size2 := github_com_gogo_protobuf_proto.Size(p)
|
||||
dAtA, err := github_com_gogo_protobuf_proto.Marshal(p)
|
||||
if err != nil {
|
||||
t.Fatalf("seed = %d, err = %v", seed, err)
|
||||
}
|
||||
size := p.Size()
|
||||
if len(dAtA) != size {
|
||||
t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA))
|
||||
}
|
||||
if size2 != size {
|
||||
t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2)
|
||||
}
|
||||
size3 := github_com_gogo_protobuf_proto.Size(p)
|
||||
if size3 != size {
|
||||
t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3)
|
||||
}
|
||||
}
|
||||
|
||||
func TestHeaderSize(t *testing.T) {
|
||||
seed := time.Now().UnixNano()
|
||||
popr := math_rand.New(math_rand.NewSource(seed))
|
||||
|
||||
@@ -7,6 +7,7 @@ clone_folder: c:\go\path\src\github.com\tendermint\tendermint
|
||||
before_build:
|
||||
- cmd: set GOPATH=%GOROOT%\path
|
||||
- cmd: set PATH=%GOPATH%\bin;%PATH%
|
||||
- cmd: make get_vendor_deps
|
||||
build_script:
|
||||
- cmd: make test
|
||||
test: off
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
package behaviour
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// PeerBehaviour is a struct describing a behaviour a peer performed.
|
||||
// `peerID` identifies the peer and reason characterizes the specific
|
||||
// behaviour performed by the peer.
|
||||
type PeerBehaviour struct {
|
||||
peerID p2p.ID
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
type badMessage struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// BadMessage returns a badMessage PeerBehaviour.
|
||||
func BadMessage(peerID p2p.ID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: badMessage{explanation}}
|
||||
}
|
||||
|
||||
type messageOutOfOrder struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// MessageOutOfOrder returns a messagOutOfOrder PeerBehaviour.
|
||||
func MessageOutOfOrder(peerID p2p.ID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: messageOutOfOrder{explanation}}
|
||||
}
|
||||
|
||||
type consensusVote struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// ConsensusVote returns a consensusVote PeerBehaviour.
|
||||
func ConsensusVote(peerID p2p.ID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: consensusVote{explanation}}
|
||||
}
|
||||
|
||||
type blockPart struct {
|
||||
explanation string
|
||||
}
|
||||
|
||||
// BlockPart returns blockPart PeerBehaviour.
|
||||
func BlockPart(peerID p2p.ID, explanation string) PeerBehaviour {
|
||||
return PeerBehaviour{peerID: peerID, reason: blockPart{explanation}}
|
||||
}
|
||||
@@ -1,84 +0,0 @@
|
||||
package behaviour
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// Reporter provides an interface for reactors to report the behaviour
|
||||
// of peers synchronously to other components.
|
||||
type Reporter interface {
|
||||
Report(behaviour PeerBehaviour) error
|
||||
}
|
||||
|
||||
// SwitchReporter reports peer behaviour to an internal Switch.
|
||||
type SwitchReporter struct {
|
||||
sw *p2p.Switch
|
||||
}
|
||||
|
||||
// NewSwitchReporter return a new SwitchReporter instance which wraps the Switch.
|
||||
func NewSwitcReporter(sw *p2p.Switch) *SwitchReporter {
|
||||
return &SwitchReporter{
|
||||
sw: sw,
|
||||
}
|
||||
}
|
||||
|
||||
// Report reports the behaviour of a peer to the Switch.
|
||||
func (spbr *SwitchReporter) Report(behaviour PeerBehaviour) error {
|
||||
peer := spbr.sw.Peers().Get(behaviour.peerID)
|
||||
if peer == nil {
|
||||
return errors.New("peer not found")
|
||||
}
|
||||
|
||||
switch reason := behaviour.reason.(type) {
|
||||
case consensusVote, blockPart:
|
||||
spbr.sw.MarkPeerAsGood(peer)
|
||||
case badMessage:
|
||||
spbr.sw.StopPeerForError(peer, reason.explanation)
|
||||
case messageOutOfOrder:
|
||||
spbr.sw.StopPeerForError(peer, reason.explanation)
|
||||
default:
|
||||
return errors.New("unknown reason reported")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MockReporter is a concrete implementation of the Reporter
|
||||
// interface used in reactor tests to ensure reactors report the correct
|
||||
// behaviour in manufactured scenarios.
|
||||
type MockReporter struct {
|
||||
mtx sync.RWMutex
|
||||
pb map[p2p.ID][]PeerBehaviour
|
||||
}
|
||||
|
||||
// NewMockReporter returns a Reporter which records all reported
|
||||
// behaviours in memory.
|
||||
func NewMockReporter() *MockReporter {
|
||||
return &MockReporter{
|
||||
pb: map[p2p.ID][]PeerBehaviour{},
|
||||
}
|
||||
}
|
||||
|
||||
// Report stores the PeerBehaviour produced by the peer identified by peerID.
|
||||
func (mpbr *MockReporter) Report(behaviour PeerBehaviour) {
|
||||
mpbr.mtx.Lock()
|
||||
defer mpbr.mtx.Unlock()
|
||||
mpbr.pb[behaviour.peerID] = append(mpbr.pb[behaviour.peerID], behaviour)
|
||||
}
|
||||
|
||||
// GetBehaviours returns all behaviours reported on the peer identified by peerID.
|
||||
func (mpbr *MockReporter) GetBehaviours(peerID p2p.ID) []PeerBehaviour {
|
||||
mpbr.mtx.RLock()
|
||||
defer mpbr.mtx.RUnlock()
|
||||
if items, ok := mpbr.pb[peerID]; ok {
|
||||
result := make([]PeerBehaviour, len(items))
|
||||
copy(result, items)
|
||||
|
||||
return result
|
||||
} else {
|
||||
return []PeerBehaviour{}
|
||||
}
|
||||
}
|
||||
@@ -1,186 +0,0 @@
|
||||
package behaviour_test
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
bh "github.com/tendermint/tendermint/behaviour"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
// TestMockReporter tests the MockReporter's ability to store reported
|
||||
// peer behaviour in memory indexed by the peerID.
|
||||
func TestMockReporter(t *testing.T) {
|
||||
var peerID p2p.ID = "MockPeer"
|
||||
pr := bh.NewMockReporter()
|
||||
|
||||
behaviours := pr.GetBehaviours(peerID)
|
||||
if len(behaviours) != 0 {
|
||||
t.Error("Expected to have no behaviours reported")
|
||||
}
|
||||
|
||||
badMessage := bh.BadMessage(peerID, "bad message")
|
||||
pr.Report(badMessage)
|
||||
behaviours = pr.GetBehaviours(peerID)
|
||||
if len(behaviours) != 1 {
|
||||
t.Error("Expected the peer have one reported behaviour")
|
||||
}
|
||||
|
||||
if behaviours[0] != badMessage {
|
||||
t.Error("Expected Bad Message to have been reported")
|
||||
}
|
||||
}
|
||||
|
||||
type scriptItem struct {
|
||||
peerID p2p.ID
|
||||
behaviour bh.PeerBehaviour
|
||||
}
|
||||
|
||||
// equalBehaviours returns true if a and b contain the same PeerBehaviours with
|
||||
// the same freequencies and otherwise false.
|
||||
func equalBehaviours(a []bh.PeerBehaviour, b []bh.PeerBehaviour) bool {
|
||||
aHistogram := map[bh.PeerBehaviour]int{}
|
||||
bHistogram := map[bh.PeerBehaviour]int{}
|
||||
|
||||
for _, behaviour := range a {
|
||||
aHistogram[behaviour] += 1
|
||||
}
|
||||
|
||||
for _, behaviour := range b {
|
||||
bHistogram[behaviour] += 1
|
||||
}
|
||||
|
||||
if len(aHistogram) != len(bHistogram) {
|
||||
return false
|
||||
}
|
||||
|
||||
for _, behaviour := range a {
|
||||
if aHistogram[behaviour] != bHistogram[behaviour] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for _, behaviour := range b {
|
||||
if bHistogram[behaviour] != aHistogram[behaviour] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// TestEqualPeerBehaviours tests that equalBehaviours can tell that two slices
|
||||
// of peer behaviours can be compared for the behaviours they contain and the
|
||||
// freequencies that those behaviours occur.
|
||||
func TestEqualPeerBehaviours(t *testing.T) {
|
||||
var (
|
||||
peerID p2p.ID = "MockPeer"
|
||||
consensusVote = bh.ConsensusVote(peerID, "voted")
|
||||
blockPart = bh.BlockPart(peerID, "blocked")
|
||||
equals = []struct {
|
||||
left []bh.PeerBehaviour
|
||||
right []bh.PeerBehaviour
|
||||
}{
|
||||
// Empty sets
|
||||
{[]bh.PeerBehaviour{}, []bh.PeerBehaviour{}},
|
||||
// Single behaviours
|
||||
{[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{consensusVote}},
|
||||
// Equal Frequencies
|
||||
{[]bh.PeerBehaviour{consensusVote, consensusVote},
|
||||
[]bh.PeerBehaviour{consensusVote, consensusVote}},
|
||||
// Equal frequencies different orders
|
||||
{[]bh.PeerBehaviour{consensusVote, blockPart},
|
||||
[]bh.PeerBehaviour{blockPart, consensusVote}},
|
||||
}
|
||||
unequals = []struct {
|
||||
left []bh.PeerBehaviour
|
||||
right []bh.PeerBehaviour
|
||||
}{
|
||||
// Comparing empty sets to non empty sets
|
||||
{[]bh.PeerBehaviour{}, []bh.PeerBehaviour{consensusVote}},
|
||||
// Different behaviours
|
||||
{[]bh.PeerBehaviour{consensusVote}, []bh.PeerBehaviour{blockPart}},
|
||||
// Same behaviour with different frequencies
|
||||
{[]bh.PeerBehaviour{consensusVote},
|
||||
[]bh.PeerBehaviour{consensusVote, consensusVote}},
|
||||
}
|
||||
)
|
||||
|
||||
for _, test := range equals {
|
||||
if !equalBehaviours(test.left, test.right) {
|
||||
t.Errorf("Expected %#v and %#v to be equal", test.left, test.right)
|
||||
}
|
||||
}
|
||||
|
||||
for _, test := range unequals {
|
||||
if equalBehaviours(test.left, test.right) {
|
||||
t.Errorf("Expected %#v and %#v to be unequal", test.left, test.right)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TestPeerBehaviourConcurrency constructs a scenario in which
|
||||
// multiple goroutines are using the same MockReporter instance.
|
||||
// This test reproduces the conditions in which MockReporter will
|
||||
// be used within a Reactor `Receive` method tests to ensure thread safety.
|
||||
func TestMockPeerBehaviourReporterConcurrency(t *testing.T) {
|
||||
var (
|
||||
behaviourScript = []struct {
|
||||
peerID p2p.ID
|
||||
behaviours []bh.PeerBehaviour
|
||||
}{
|
||||
{"1", []bh.PeerBehaviour{bh.ConsensusVote("1", "")}},
|
||||
{"2", []bh.PeerBehaviour{bh.ConsensusVote("2", ""), bh.ConsensusVote("2", ""), bh.ConsensusVote("2", "")}},
|
||||
{"3", []bh.PeerBehaviour{bh.BlockPart("3", ""), bh.ConsensusVote("3", ""), bh.BlockPart("3", ""), bh.ConsensusVote("3", "")}},
|
||||
{"4", []bh.PeerBehaviour{bh.ConsensusVote("4", ""), bh.ConsensusVote("4", ""), bh.ConsensusVote("4", ""), bh.ConsensusVote("4", "")}},
|
||||
{"5", []bh.PeerBehaviour{bh.BlockPart("5", ""), bh.ConsensusVote("5", ""), bh.BlockPart("5", ""), bh.ConsensusVote("5", "")}},
|
||||
}
|
||||
)
|
||||
|
||||
var receiveWg sync.WaitGroup
|
||||
pr := bh.NewMockReporter()
|
||||
scriptItems := make(chan scriptItem)
|
||||
done := make(chan int)
|
||||
numConsumers := 3
|
||||
for i := 0; i < numConsumers; i++ {
|
||||
receiveWg.Add(1)
|
||||
go func() {
|
||||
defer receiveWg.Done()
|
||||
for {
|
||||
select {
|
||||
case pb := <-scriptItems:
|
||||
pr.Report(pb.behaviour)
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
var sendingWg sync.WaitGroup
|
||||
sendingWg.Add(1)
|
||||
go func() {
|
||||
defer sendingWg.Done()
|
||||
for _, item := range behaviourScript {
|
||||
for _, reason := range item.behaviours {
|
||||
scriptItems <- scriptItem{item.peerID, reason}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
sendingWg.Wait()
|
||||
|
||||
for i := 0; i < numConsumers; i++ {
|
||||
done <- 1
|
||||
}
|
||||
|
||||
receiveWg.Wait()
|
||||
|
||||
for _, items := range behaviourScript {
|
||||
reported := pr.GetBehaviours(items.peerID)
|
||||
if !equalBehaviours(reported, items.behaviours) {
|
||||
t.Errorf("Expected peer %s to have behaved \nExpected: %#v \nGot %#v \n",
|
||||
items.peerID, items.behaviours, reported)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/go-amino"
|
||||
|
||||
proto "github.com/tendermint/tendermint/benchmarks/proto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
|
||||
func testNodeInfo(id p2p.ID) p2p.DefaultNodeInfo {
|
||||
return p2p.DefaultNodeInfo{
|
||||
ProtocolVersion: p2p.ProtocolVersion{P2P: 1, Block: 2, App: 3},
|
||||
ProtocolVersion: p2p.ProtocolVersion{1, 2, 3},
|
||||
ID_: id,
|
||||
Moniker: "SOMENAME",
|
||||
Network: "SOMENAME",
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package v0
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@@ -59,7 +59,6 @@ var peerTimeout = 15 * time.Second // not const so we can override with tests
|
||||
are not at peer limits, we can probably switch to consensus reactor
|
||||
*/
|
||||
|
||||
// BlockPool keeps track of the fast sync peers, block requests and block responses.
|
||||
type BlockPool struct {
|
||||
cmn.BaseService
|
||||
startTime time.Time
|
||||
@@ -70,7 +69,7 @@ type BlockPool struct {
|
||||
height int64 // the lowest key in requesters.
|
||||
// peers
|
||||
peers map[p2p.ID]*bpPeer
|
||||
maxPeerHeight int64 // the biggest reported height
|
||||
maxPeerHeight int64
|
||||
|
||||
// atomic
|
||||
numPending int32 // number of requests pending assignment or block response
|
||||
@@ -79,8 +78,6 @@ type BlockPool struct {
|
||||
errorsCh chan<- peerError
|
||||
}
|
||||
|
||||
// NewBlockPool returns a new BlockPool with the height equal to start. Block
|
||||
// requests and errors will be sent to requestsCh and errorsCh accordingly.
|
||||
func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool {
|
||||
bp := &BlockPool{
|
||||
peers: make(map[p2p.ID]*bpPeer),
|
||||
@@ -96,15 +93,15 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
|
||||
return bp
|
||||
}
|
||||
|
||||
// OnStart implements cmn.Service by spawning requesters routine and recording
|
||||
// pool's start time.
|
||||
func (pool *BlockPool) OnStart() error {
|
||||
go pool.makeRequestersRoutine()
|
||||
pool.startTime = time.Now()
|
||||
return nil
|
||||
}
|
||||
|
||||
// spawns requesters as needed
|
||||
func (pool *BlockPool) OnStop() {}
|
||||
|
||||
// Run spawns requesters as needed.
|
||||
func (pool *BlockPool) makeRequestersRoutine() {
|
||||
for {
|
||||
if !pool.IsRunning() {
|
||||
@@ -112,18 +109,17 @@ func (pool *BlockPool) makeRequestersRoutine() {
|
||||
}
|
||||
|
||||
_, numPending, lenRequesters := pool.GetStatus()
|
||||
switch {
|
||||
case numPending >= maxPendingRequests:
|
||||
if numPending >= maxPendingRequests {
|
||||
// sleep for a bit.
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
// check for timed out peers
|
||||
pool.removeTimedoutPeers()
|
||||
case lenRequesters >= maxTotalRequesters:
|
||||
} else if lenRequesters >= maxTotalRequesters {
|
||||
// sleep for a bit.
|
||||
time.Sleep(requestIntervalMS * time.Millisecond)
|
||||
// check for timed out peers
|
||||
pool.removeTimedoutPeers()
|
||||
default:
|
||||
} else {
|
||||
// request for more blocks.
|
||||
pool.makeNextRequester()
|
||||
}
|
||||
@@ -154,8 +150,6 @@ func (pool *BlockPool) removeTimedoutPeers() {
|
||||
}
|
||||
}
|
||||
|
||||
// GetStatus returns pool's height, numPending requests and the number of
|
||||
// requesters.
|
||||
func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequesters int) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
@@ -163,7 +157,6 @@ func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequester
|
||||
return pool.height, atomic.LoadInt32(&pool.numPending), len(pool.requesters)
|
||||
}
|
||||
|
||||
// IsCaughtUp returns true if this node is caught up, false - otherwise.
|
||||
// TODO: relax conditions, prevent abuse.
|
||||
func (pool *BlockPool) IsCaughtUp() bool {
|
||||
pool.mtx.Lock()
|
||||
@@ -175,18 +168,13 @@ func (pool *BlockPool) IsCaughtUp() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// Some conditions to determine if we're caught up.
|
||||
// Ensures we've either received a block or waited some amount of time,
|
||||
// and that we're synced to the highest known height.
|
||||
// Note we use maxPeerHeight - 1 because to sync block H requires block H+1
|
||||
// to verify the LastCommit.
|
||||
receivedBlockOrTimedOut := pool.height > 0 || time.Since(pool.startTime) > 5*time.Second
|
||||
ourChainIsLongestAmongPeers := pool.maxPeerHeight == 0 || pool.height >= (pool.maxPeerHeight-1)
|
||||
// some conditions to determine if we're caught up
|
||||
receivedBlockOrTimedOut := (pool.height > 0 || time.Since(pool.startTime) > 5*time.Second)
|
||||
ourChainIsLongestAmongPeers := pool.maxPeerHeight == 0 || pool.height >= pool.maxPeerHeight
|
||||
isCaughtUp := receivedBlockOrTimedOut && ourChainIsLongestAmongPeers
|
||||
return isCaughtUp
|
||||
}
|
||||
|
||||
// PeekTwoBlocks returns blocks at pool.height and pool.height+1.
|
||||
// We need to see the second block's Commit to validate the first block.
|
||||
// So we peek two blocks at a time.
|
||||
// The caller will verify the commit.
|
||||
@@ -203,7 +191,7 @@ func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block)
|
||||
return
|
||||
}
|
||||
|
||||
// PopRequest pops the first block at pool.height.
|
||||
// Pop the first block at pool.height
|
||||
// It must have been validated by 'second'.Commit from PeekTwoBlocks().
|
||||
func (pool *BlockPool) PopRequest() {
|
||||
pool.mtx.Lock()
|
||||
@@ -223,7 +211,7 @@ func (pool *BlockPool) PopRequest() {
|
||||
}
|
||||
}
|
||||
|
||||
// RedoRequest invalidates the block at pool.height,
|
||||
// Invalidates the block at pool.height,
|
||||
// Remove the peer and redo request from others.
|
||||
// Returns the ID of the removed peer.
|
||||
func (pool *BlockPool) RedoRequest(height int64) p2p.ID {
|
||||
@@ -239,7 +227,6 @@ func (pool *BlockPool) RedoRequest(height int64) p2p.ID {
|
||||
return peerID
|
||||
}
|
||||
|
||||
// AddBlock validates that the block comes from the peer it was expected from and calls the requester to store it.
|
||||
// TODO: ensure that blocks come in order for each peer.
|
||||
func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int) {
|
||||
pool.mtx.Lock()
|
||||
@@ -265,19 +252,18 @@ func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int
|
||||
peer.decrPending(blockSize)
|
||||
}
|
||||
} else {
|
||||
pool.Logger.Info("invalid peer", "peer", peerID, "blockHeight", block.Height)
|
||||
pool.sendError(errors.New("invalid peer"), peerID)
|
||||
// Bad peer?
|
||||
}
|
||||
}
|
||||
|
||||
// MaxPeerHeight returns the highest reported height.
|
||||
// MaxPeerHeight returns the highest height reported by a peer.
|
||||
func (pool *BlockPool) MaxPeerHeight() int64 {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
return pool.maxPeerHeight
|
||||
}
|
||||
|
||||
// SetPeerHeight sets the peer's alleged blockchain height.
|
||||
// Sets the peer's alleged blockchain height.
|
||||
func (pool *BlockPool) SetPeerHeight(peerID p2p.ID, height int64) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
@@ -296,8 +282,6 @@ func (pool *BlockPool) SetPeerHeight(peerID p2p.ID, height int64) {
|
||||
}
|
||||
}
|
||||
|
||||
// RemovePeer removes the peer with peerID from the pool. If there's no peer
|
||||
// with peerID, function is a no-op.
|
||||
func (pool *BlockPool) RemovePeer(peerID p2p.ID) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
@@ -308,35 +292,10 @@ func (pool *BlockPool) RemovePeer(peerID p2p.ID) {
|
||||
func (pool *BlockPool) removePeer(peerID p2p.ID) {
|
||||
for _, requester := range pool.requesters {
|
||||
if requester.getPeerID() == peerID {
|
||||
requester.redo(peerID)
|
||||
requester.redo()
|
||||
}
|
||||
}
|
||||
|
||||
peer, ok := pool.peers[peerID]
|
||||
if ok {
|
||||
if peer.timeout != nil {
|
||||
peer.timeout.Stop()
|
||||
}
|
||||
|
||||
delete(pool.peers, peerID)
|
||||
|
||||
// Find a new peer with the biggest height and update maxPeerHeight if the
|
||||
// peer's height was the biggest.
|
||||
if peer.height == pool.maxPeerHeight {
|
||||
pool.updateMaxPeerHeight()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no peers are left, maxPeerHeight is set to 0.
|
||||
func (pool *BlockPool) updateMaxPeerHeight() {
|
||||
var max int64
|
||||
for _, peer := range pool.peers {
|
||||
if peer.height > max {
|
||||
max = peer.height
|
||||
}
|
||||
}
|
||||
pool.maxPeerHeight = max
|
||||
delete(pool.peers, peerID)
|
||||
}
|
||||
|
||||
// Pick an available peer with at least the given minHeight.
|
||||
@@ -367,11 +326,8 @@ func (pool *BlockPool) makeNextRequester() {
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
nextHeight := pool.height + pool.requestersLen()
|
||||
if nextHeight > pool.maxPeerHeight {
|
||||
return
|
||||
}
|
||||
|
||||
request := newBPRequester(pool, nextHeight)
|
||||
// request.SetLogger(pool.Logger.With("height", nextHeight))
|
||||
|
||||
pool.requesters[nextHeight] = request
|
||||
atomic.AddInt32(&pool.numPending, 1)
|
||||
@@ -400,8 +356,7 @@ func (pool *BlockPool) sendError(err error, peerID p2p.ID) {
|
||||
pool.errorsCh <- peerError{err, peerID}
|
||||
}
|
||||
|
||||
// for debugging purposes
|
||||
//nolint:unused
|
||||
// unused by tendermint; left for debugging purposes
|
||||
func (pool *BlockPool) debug() string {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
@@ -498,7 +453,7 @@ type bpRequester struct {
|
||||
pool *BlockPool
|
||||
height int64
|
||||
gotBlockCh chan struct{}
|
||||
redoCh chan p2p.ID //redo may send multitime, add peerId to identify repeat
|
||||
redoCh chan struct{}
|
||||
|
||||
mtx sync.Mutex
|
||||
peerID p2p.ID
|
||||
@@ -510,7 +465,7 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester {
|
||||
pool: pool,
|
||||
height: height,
|
||||
gotBlockCh: make(chan struct{}, 1),
|
||||
redoCh: make(chan p2p.ID, 1),
|
||||
redoCh: make(chan struct{}, 1),
|
||||
|
||||
peerID: "",
|
||||
block: nil,
|
||||
@@ -569,9 +524,9 @@ func (bpr *bpRequester) reset() {
|
||||
// Tells bpRequester to pick another peer and try again.
|
||||
// NOTE: Nonblocking, and does nothing if another redo
|
||||
// was already requested.
|
||||
func (bpr *bpRequester) redo(peerID p2p.ID) {
|
||||
func (bpr *bpRequester) redo() {
|
||||
select {
|
||||
case bpr.redoCh <- peerID:
|
||||
case bpr.redoCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
@@ -610,13 +565,9 @@ OUTER_LOOP:
|
||||
return
|
||||
case <-bpr.Quit():
|
||||
return
|
||||
case peerID := <-bpr.redoCh:
|
||||
if peerID == bpr.peerID {
|
||||
bpr.reset()
|
||||
continue OUTER_LOOP
|
||||
} else {
|
||||
continue WAIT_LOOP
|
||||
}
|
||||
case <-bpr.redoCh:
|
||||
bpr.reset()
|
||||
continue OUTER_LOOP
|
||||
case <-bpr.gotBlockCh:
|
||||
// We got a block!
|
||||
// Continue the for-loop and wait til Quit.
|
||||
@@ -626,8 +577,8 @@ OUTER_LOOP:
|
||||
}
|
||||
}
|
||||
|
||||
// BlockRequest stores a block request identified by the block Height and the PeerID responsible for
|
||||
// delivering the block
|
||||
//-------------------------------------
|
||||
|
||||
type BlockRequest struct {
|
||||
Height int64
|
||||
PeerID p2p.ID
|
||||
147
blockchain/pool_test.go
Normal file
147
blockchain/pool_test.go
Normal file
@@ -0,0 +1,147 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
peerTimeout = 2 * time.Second
|
||||
}
|
||||
|
||||
type testPeer struct {
|
||||
id p2p.ID
|
||||
height int64
|
||||
}
|
||||
|
||||
func makePeers(numPeers int, minHeight, maxHeight int64) map[p2p.ID]testPeer {
|
||||
peers := make(map[p2p.ID]testPeer, numPeers)
|
||||
for i := 0; i < numPeers; i++ {
|
||||
peerID := p2p.ID(cmn.RandStr(12))
|
||||
height := minHeight + cmn.RandInt63n(maxHeight-minHeight)
|
||||
peers[peerID] = testPeer{peerID, height}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
func TestBasic(t *testing.T) {
|
||||
start := int64(42)
|
||||
peers := makePeers(10, start+1, 1000)
|
||||
errorsCh := make(chan peerError, 1000)
|
||||
requestsCh := make(chan BlockRequest, 1000)
|
||||
pool := NewBlockPool(start, requestsCh, errorsCh)
|
||||
pool.SetLogger(log.TestingLogger())
|
||||
|
||||
err := pool.Start()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
defer pool.Stop()
|
||||
|
||||
// Introduce each peer.
|
||||
go func() {
|
||||
for _, peer := range peers {
|
||||
pool.SetPeerHeight(peer.id, peer.height)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start a goroutine to pull blocks
|
||||
go func() {
|
||||
for {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
first, second := pool.PeekTwoBlocks()
|
||||
if first != nil && second != nil {
|
||||
pool.PopRequest()
|
||||
} else {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Pull from channels
|
||||
for {
|
||||
select {
|
||||
case err := <-errorsCh:
|
||||
t.Error(err)
|
||||
case request := <-requestsCh:
|
||||
t.Logf("Pulled new BlockRequest %v", request)
|
||||
if request.Height == 300 {
|
||||
return // Done!
|
||||
}
|
||||
// Request desired, pretend like we got the block immediately.
|
||||
go func() {
|
||||
block := &types.Block{Header: types.Header{Height: request.Height}}
|
||||
pool.AddBlock(request.PeerID, block, 123)
|
||||
t.Logf("Added block from peer %v (height: %v)", request.PeerID, request.Height)
|
||||
}()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimeout(t *testing.T) {
|
||||
start := int64(42)
|
||||
peers := makePeers(10, start+1, 1000)
|
||||
errorsCh := make(chan peerError, 1000)
|
||||
requestsCh := make(chan BlockRequest, 1000)
|
||||
pool := NewBlockPool(start, requestsCh, errorsCh)
|
||||
pool.SetLogger(log.TestingLogger())
|
||||
err := pool.Start()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer pool.Stop()
|
||||
|
||||
for _, peer := range peers {
|
||||
t.Logf("Peer %v", peer.id)
|
||||
}
|
||||
|
||||
// Introduce each peer.
|
||||
go func() {
|
||||
for _, peer := range peers {
|
||||
pool.SetPeerHeight(peer.id, peer.height)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start a goroutine to pull blocks
|
||||
go func() {
|
||||
for {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
first, second := pool.PeekTwoBlocks()
|
||||
if first != nil && second != nil {
|
||||
pool.PopRequest()
|
||||
} else {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Pull from channels
|
||||
counter := 0
|
||||
timedOut := map[p2p.ID]struct{}{}
|
||||
for {
|
||||
select {
|
||||
case err := <-errorsCh:
|
||||
t.Log(err)
|
||||
// consider error to be always timeout here
|
||||
if _, ok := timedOut[err.peerID]; !ok {
|
||||
counter++
|
||||
if counter == len(peers) {
|
||||
return // Done!
|
||||
}
|
||||
}
|
||||
case request := <-requestsCh:
|
||||
t.Logf("Pulled new BlockRequest %+v", request)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,17 +1,16 @@
|
||||
package v0
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
amino "github.com/tendermint/go-amino"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -61,7 +60,7 @@ type BlockchainReactor struct {
|
||||
initialState sm.State
|
||||
|
||||
blockExec *sm.BlockExecutor
|
||||
store *store.BlockStore
|
||||
store *BlockStore
|
||||
pool *BlockPool
|
||||
fastSync bool
|
||||
|
||||
@@ -70,7 +69,7 @@ type BlockchainReactor struct {
|
||||
}
|
||||
|
||||
// NewBlockchainReactor returns new reactor instance.
|
||||
func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
|
||||
func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *BlockStore,
|
||||
fastSync bool) *BlockchainReactor {
|
||||
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
@@ -141,9 +140,9 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
|
||||
peer.Send(BlockchainChannel, msgBytes)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
if !peer.Send(BlockchainChannel, msgBytes) {
|
||||
// doing nothing, will try later in `poolRoutine`
|
||||
}
|
||||
// peer is added to the pool once we receive the first
|
||||
// bcStatusResponseMessage from the peer and call pool.SetPeerHeight
|
||||
}
|
||||
@@ -181,23 +180,23 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte)
|
||||
return
|
||||
}
|
||||
|
||||
if err = msg.ValidateBasic(); err != nil {
|
||||
bcR.Logger.Error("Peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
bcR.Switch.StopPeerForError(src, err)
|
||||
return
|
||||
}
|
||||
|
||||
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *bcBlockRequestMessage:
|
||||
bcR.respondToPeer(msg, src)
|
||||
if queued := bcR.respondToPeer(msg, src); !queued {
|
||||
// Unfortunately not queued since the queue is full.
|
||||
}
|
||||
case *bcBlockResponseMessage:
|
||||
// Got a block.
|
||||
bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes))
|
||||
case *bcStatusRequestMessage:
|
||||
// Send peer our state.
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
|
||||
src.TrySend(BlockchainChannel, msgBytes)
|
||||
queued := src.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
// sorry
|
||||
}
|
||||
case *bcStatusResponseMessage:
|
||||
// Got a peer status. Unverified.
|
||||
bcR.pool.SetPeerHeight(src.ID(), msg.Height)
|
||||
@@ -224,40 +223,32 @@ func (bcR *BlockchainReactor) poolRoutine() {
|
||||
|
||||
didProcessCh := make(chan struct{}, 1)
|
||||
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case <-bcR.Quit():
|
||||
return
|
||||
case <-bcR.pool.Quit():
|
||||
return
|
||||
case request := <-bcR.requestsCh:
|
||||
peer := bcR.Switch.Peers().Get(request.PeerID)
|
||||
if peer == nil {
|
||||
continue
|
||||
}
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcBlockRequestMessage{request.Height})
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
bcR.Logger.Debug("Send queue is full, drop block request", "peer", peer.ID(), "height", request.Height)
|
||||
}
|
||||
case err := <-bcR.errorsCh:
|
||||
peer := bcR.Switch.Peers().Get(err.peerID)
|
||||
if peer != nil {
|
||||
bcR.Switch.StopPeerForError(peer, err)
|
||||
}
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// ask for status updates
|
||||
go bcR.BroadcastStatusRequest() // nolint: errcheck
|
||||
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
FOR_LOOP:
|
||||
for {
|
||||
select {
|
||||
case request := <-bcR.requestsCh:
|
||||
peer := bcR.Switch.Peers().Get(request.PeerID)
|
||||
if peer == nil {
|
||||
continue FOR_LOOP // Peer has since been disconnected.
|
||||
}
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcBlockRequestMessage{request.Height})
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
// We couldn't make the request, send-queue full.
|
||||
// The pool handles timeouts, just let it go.
|
||||
continue FOR_LOOP
|
||||
}
|
||||
|
||||
case err := <-bcR.errorsCh:
|
||||
peer := bcR.Switch.Peers().Get(err.peerID)
|
||||
if peer != nil {
|
||||
bcR.Switch.StopPeerForError(peer, err)
|
||||
}
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// ask for status updates
|
||||
go bcR.BroadcastStatusRequest() // nolint: errcheck
|
||||
|
||||
case <-switchToConsensusTicker.C:
|
||||
height, numPending, lenRequesters := bcR.pool.GetStatus()
|
||||
outbound, inbound, _ := bcR.Switch.NumPeers()
|
||||
@@ -266,13 +257,9 @@ FOR_LOOP:
|
||||
if bcR.pool.IsCaughtUp() {
|
||||
bcR.Logger.Info("Time to switch to consensus reactor!", "height", height)
|
||||
bcR.pool.Stop()
|
||||
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(state, blocksSynced)
|
||||
}
|
||||
// else {
|
||||
// should only happen during testing
|
||||
// }
|
||||
|
||||
conR := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
conR.SwitchToConsensus(state, blocksSynced)
|
||||
|
||||
break FOR_LOOP
|
||||
}
|
||||
@@ -305,7 +292,7 @@ FOR_LOOP:
|
||||
|
||||
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstPartsHeader := firstParts.Header()
|
||||
firstID := types.BlockID{Hash: first.Hash(), PartsHeader: firstPartsHeader}
|
||||
firstID := types.BlockID{first.Hash(), firstPartsHeader}
|
||||
// Finally, verify the first block using the second's commit
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
@@ -321,13 +308,6 @@ FOR_LOOP:
|
||||
// still need to clean up the rest.
|
||||
bcR.Switch.StopPeerForError(peer, fmt.Errorf("BlockchainReactor validation error: %v", err))
|
||||
}
|
||||
peerID2 := bcR.pool.RedoRequest(second.Height)
|
||||
peer2 := bcR.Switch.Peers().Get(peerID2)
|
||||
if peer2 != nil && peer2 != peer {
|
||||
// NOTE: we've already removed the peer's request, but we
|
||||
// still need to clean up the rest.
|
||||
bcR.Switch.StopPeerForError(peer2, fmt.Errorf("BlockchainReactor validation error: %v", err))
|
||||
}
|
||||
continue FOR_LOOP
|
||||
} else {
|
||||
bcR.pool.PopRequest()
|
||||
@@ -341,7 +321,8 @@ FOR_LOOP:
|
||||
state, err = bcR.blockExec.ApplyBlock(state, firstID, first)
|
||||
if err != nil {
|
||||
// TODO This is bad, are we zombie?
|
||||
panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
cmn.PanicQ(fmt.Sprintf("Failed to process committed block (%d:%X): %v",
|
||||
first.Height, first.Hash(), err))
|
||||
}
|
||||
blocksSynced++
|
||||
|
||||
@@ -371,11 +352,8 @@ func (bcR *BlockchainReactor) BroadcastStatusRequest() error {
|
||||
// Messages
|
||||
|
||||
// BlockchainMessage is a generic message for this reactor.
|
||||
type BlockchainMessage interface {
|
||||
ValidateBasic() error
|
||||
}
|
||||
type BlockchainMessage interface{}
|
||||
|
||||
// RegisterBlockchainMessages registers the fast sync messages for amino encoding.
|
||||
func RegisterBlockchainMessages(cdc *amino.Codec) {
|
||||
cdc.RegisterInterface((*BlockchainMessage)(nil), nil)
|
||||
cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/blockchain/BlockRequest", nil)
|
||||
@@ -399,14 +377,6 @@ type bcBlockRequestMessage struct {
|
||||
Height int64
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcBlockRequestMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("Negative Height")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *bcBlockRequestMessage) String() string {
|
||||
return fmt.Sprintf("[bcBlockRequestMessage %v]", m.Height)
|
||||
}
|
||||
@@ -415,16 +385,8 @@ type bcNoBlockResponseMessage struct {
|
||||
Height int64
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcNoBlockResponseMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("Negative Height")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *bcNoBlockResponseMessage) String() string {
|
||||
return fmt.Sprintf("[bcNoBlockResponseMessage %d]", m.Height)
|
||||
func (brm *bcNoBlockResponseMessage) String() string {
|
||||
return fmt.Sprintf("[bcNoBlockResponseMessage %d]", brm.Height)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
@@ -433,11 +395,6 @@ type bcBlockResponseMessage struct {
|
||||
Block *types.Block
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcBlockResponseMessage) ValidateBasic() error {
|
||||
return m.Block.ValidateBasic()
|
||||
}
|
||||
|
||||
func (m *bcBlockResponseMessage) String() string {
|
||||
return fmt.Sprintf("[bcBlockResponseMessage %v]", m.Block.Height)
|
||||
}
|
||||
@@ -448,14 +405,6 @@ type bcStatusRequestMessage struct {
|
||||
Height int64
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcStatusRequestMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("Negative Height")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *bcStatusRequestMessage) String() string {
|
||||
return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height)
|
||||
}
|
||||
@@ -466,14 +415,6 @@ type bcStatusResponseMessage struct {
|
||||
Height int64
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcStatusResponseMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("Negative Height")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *bcStatusResponseMessage) String() string {
|
||||
return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height)
|
||||
}
|
||||
209
blockchain/reactor_test.go
Normal file
209
blockchain/reactor_test.go
Normal file
@@ -0,0 +1,209 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"net"
|
||||
"testing"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
// blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB())
|
||||
// stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB())
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockStore := NewBlockStore(blockDB)
|
||||
state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile())
|
||||
if err != nil {
|
||||
panic(cmn.ErrorWrap(err, "error constructing state from genesis file"))
|
||||
}
|
||||
return state, blockStore
|
||||
}
|
||||
|
||||
func newBlockchainReactor(logger log.Logger, maxBlockHeight int64) *BlockchainReactor {
|
||||
state, blockStore := makeStateAndBlockStore(logger)
|
||||
|
||||
// Make the blockchainReactor itself
|
||||
fastSync := true
|
||||
var nilApp proxy.AppConnConsensus
|
||||
blockExec := sm.NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), nilApp,
|
||||
sm.MockMempool{}, sm.MockEvidencePool{})
|
||||
|
||||
bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||
bcReactor.SetLogger(logger.With("module", "blockchain"))
|
||||
|
||||
// Next: we need to set a switch in order for peers to be added in
|
||||
bcReactor.Switch = p2p.NewSwitch(cfg.DefaultP2PConfig(), nil)
|
||||
|
||||
// Lastly: let's add some blocks in
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
firstBlock := makeBlock(blockHeight, state)
|
||||
secondBlock := makeBlock(blockHeight+1, state)
|
||||
firstParts := firstBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockStore.SaveBlock(firstBlock, firstParts, secondBlock.LastCommit)
|
||||
}
|
||||
|
||||
return bcReactor
|
||||
}
|
||||
|
||||
func TestNoBlockResponse(t *testing.T) {
|
||||
maxBlockHeight := int64(20)
|
||||
|
||||
bcr := newBlockchainReactor(log.TestingLogger(), maxBlockHeight)
|
||||
bcr.Start()
|
||||
defer bcr.Stop()
|
||||
|
||||
// Add some peers in
|
||||
peer := newbcrTestPeer(p2p.ID(cmn.RandStr(12)))
|
||||
bcr.AddPeer(peer)
|
||||
|
||||
chID := byte(0x01)
|
||||
|
||||
tests := []struct {
|
||||
height int64
|
||||
existent bool
|
||||
}{
|
||||
{maxBlockHeight + 2, false},
|
||||
{10, true},
|
||||
{1, true},
|
||||
{100, false},
|
||||
}
|
||||
|
||||
// receive a request message from peer,
|
||||
// wait for our response to be received on the peer
|
||||
for _, tt := range tests {
|
||||
reqBlockMsg := &bcBlockRequestMessage{tt.height}
|
||||
reqBlockBytes := cdc.MustMarshalBinaryBare(reqBlockMsg)
|
||||
bcr.Receive(chID, peer, reqBlockBytes)
|
||||
msg := peer.lastBlockchainMessage()
|
||||
|
||||
if tt.existent {
|
||||
if blockMsg, ok := msg.(*bcBlockResponseMessage); !ok {
|
||||
t.Fatalf("Expected to receive a block response for height %d", tt.height)
|
||||
} else if blockMsg.Block.Height != tt.height {
|
||||
t.Fatalf("Expected response to be for height %d, got %d", tt.height, blockMsg.Block.Height)
|
||||
}
|
||||
} else {
|
||||
if noBlockMsg, ok := msg.(*bcNoBlockResponseMessage); !ok {
|
||||
t.Fatalf("Expected to receive a no block response for height %d", tt.height)
|
||||
} else if noBlockMsg.Height != tt.height {
|
||||
t.Fatalf("Expected response to be for height %d, got %d", tt.height, noBlockMsg.Height)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
// NOTE: This is too hard to test without
|
||||
// an easy way to add test peer to switch
|
||||
// or without significant refactoring of the module.
|
||||
// Alternatively we could actually dial a TCP conn but
|
||||
// that seems extreme.
|
||||
func TestBadBlockStopsPeer(t *testing.T) {
|
||||
maxBlockHeight := int64(20)
|
||||
|
||||
bcr := newBlockchainReactor(log.TestingLogger(), maxBlockHeight)
|
||||
bcr.Start()
|
||||
defer bcr.Stop()
|
||||
|
||||
// Add some peers in
|
||||
peer := newbcrTestPeer(p2p.ID(cmn.RandStr(12)))
|
||||
|
||||
// XXX: This doesn't add the peer to anything,
|
||||
// so it's hard to check that it's later removed
|
||||
bcr.AddPeer(peer)
|
||||
assert.True(t, bcr.Switch.Peers().Size() > 0)
|
||||
|
||||
// send a bad block from the peer
|
||||
// default blocks already dont have commits, so should fail
|
||||
block := bcr.store.LoadBlock(3)
|
||||
msg := &bcBlockResponseMessage{Block: block}
|
||||
peer.Send(BlockchainChannel, struct{ BlockchainMessage }{msg})
|
||||
|
||||
ticker := time.NewTicker(time.Millisecond * 10)
|
||||
timer := time.NewTimer(time.Second * 2)
|
||||
LOOP:
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
if bcr.Switch.Peers().Size() == 0 {
|
||||
break LOOP
|
||||
}
|
||||
case <-timer.C:
|
||||
t.Fatal("Timed out waiting to disconnect peer")
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
//----------------------------------------------
|
||||
// utility funcs
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), new(types.Commit), nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
|
||||
// The Test peer
|
||||
type bcrTestPeer struct {
|
||||
cmn.BaseService
|
||||
id p2p.ID
|
||||
ch chan interface{}
|
||||
}
|
||||
|
||||
var _ p2p.Peer = (*bcrTestPeer)(nil)
|
||||
|
||||
func newbcrTestPeer(id p2p.ID) *bcrTestPeer {
|
||||
bcr := &bcrTestPeer{
|
||||
id: id,
|
||||
ch: make(chan interface{}, 2),
|
||||
}
|
||||
bcr.BaseService = *cmn.NewBaseService(nil, "bcrTestPeer", bcr)
|
||||
return bcr
|
||||
}
|
||||
|
||||
func (tp *bcrTestPeer) lastBlockchainMessage() interface{} { return <-tp.ch }
|
||||
|
||||
func (tp *bcrTestPeer) TrySend(chID byte, msgBytes []byte) bool {
|
||||
var msg BlockchainMessage
|
||||
err := cdc.UnmarshalBinaryBare(msgBytes, &msg)
|
||||
if err != nil {
|
||||
panic(cmn.ErrorWrap(err, "Error while trying to parse a BlockchainMessage"))
|
||||
}
|
||||
if _, ok := msg.(*bcStatusResponseMessage); ok {
|
||||
// Discard status response messages since they skew our results
|
||||
// We only want to deal with:
|
||||
// + bcBlockResponseMessage
|
||||
// + bcNoBlockResponseMessage
|
||||
} else {
|
||||
tp.ch <- msg
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (tp *bcrTestPeer) Send(chID byte, msgBytes []byte) bool { return tp.TrySend(chID, msgBytes) }
|
||||
func (tp *bcrTestPeer) NodeInfo() p2p.NodeInfo { return p2p.DefaultNodeInfo{} }
|
||||
func (tp *bcrTestPeer) Status() p2p.ConnectionStatus { return p2p.ConnectionStatus{} }
|
||||
func (tp *bcrTestPeer) ID() p2p.ID { return tp.id }
|
||||
func (tp *bcrTestPeer) IsOutbound() bool { return false }
|
||||
func (tp *bcrTestPeer) IsPersistent() bool { return true }
|
||||
func (tp *bcrTestPeer) Get(s string) interface{} { return s }
|
||||
func (tp *bcrTestPeer) Set(string, interface{}) {}
|
||||
func (tp *bcrTestPeer) RemoteIP() net.IP { return []byte{127, 0, 0, 1} }
|
||||
func (tp *bcrTestPeer) OriginalAddr() *p2p.NetAddress { return nil }
|
||||
@@ -1,12 +1,11 @@
|
||||
package store
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
dbm "github.com/tendermint/tendermint/libs/db"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -68,7 +67,7 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block {
|
||||
if err != nil {
|
||||
// NOTE: The existence of meta should imply the existence of the
|
||||
// block. So, make sure meta is only saved after blocks are saved.
|
||||
panic(errors.Wrap(err, "Error reading block"))
|
||||
panic(cmn.ErrorWrap(err, "Error reading block"))
|
||||
}
|
||||
return block
|
||||
}
|
||||
@@ -84,7 +83,7 @@ func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part {
|
||||
}
|
||||
err := cdc.UnmarshalBinaryBare(bz, part)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "Error reading block part"))
|
||||
panic(cmn.ErrorWrap(err, "Error reading block part"))
|
||||
}
|
||||
return part
|
||||
}
|
||||
@@ -99,7 +98,7 @@ func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
}
|
||||
err := cdc.UnmarshalBinaryBare(bz, blockMeta)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "Error reading block meta"))
|
||||
panic(cmn.ErrorWrap(err, "Error reading block meta"))
|
||||
}
|
||||
return blockMeta
|
||||
}
|
||||
@@ -116,7 +115,7 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit {
|
||||
}
|
||||
err := cdc.UnmarshalBinaryBare(bz, commit)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "Error reading block commit"))
|
||||
panic(cmn.ErrorWrap(err, "Error reading block commit"))
|
||||
}
|
||||
return commit
|
||||
}
|
||||
@@ -132,7 +131,7 @@ func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit {
|
||||
}
|
||||
err := cdc.UnmarshalBinaryBare(bz, commit)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "Error reading block seen commit"))
|
||||
panic(cmn.ErrorWrap(err, "Error reading block seen commit"))
|
||||
}
|
||||
return commit
|
||||
}
|
||||
@@ -145,14 +144,14 @@ func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit {
|
||||
// most recent height. Otherwise they'd stall at H-1.
|
||||
func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
if block == nil {
|
||||
panic("BlockStore can only save a non-nil block")
|
||||
cmn.PanicSanity("BlockStore can only save a non-nil block")
|
||||
}
|
||||
height := block.Height
|
||||
if g, w := height, bs.Height()+1; g != w {
|
||||
panic(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g))
|
||||
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g))
|
||||
}
|
||||
if !blockParts.IsComplete() {
|
||||
panic(fmt.Sprintf("BlockStore can only save complete block part sets"))
|
||||
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save complete block part sets"))
|
||||
}
|
||||
|
||||
// Save block meta
|
||||
@@ -189,7 +188,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s
|
||||
|
||||
func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) {
|
||||
if height != bs.Height()+1 {
|
||||
panic(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
|
||||
cmn.PanicSanity(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height))
|
||||
}
|
||||
partBytes := cdc.MustMarshalBinaryBare(part)
|
||||
bs.db.Set(calcBlockPartKey(height, index), partBytes)
|
||||
@@ -217,7 +216,6 @@ func calcSeenCommitKey(height int64) []byte {
|
||||
|
||||
var blockStoreKey = []byte("blockStore")
|
||||
|
||||
// BlockStoreStateJSON is the block store state JSON structure.
|
||||
type BlockStoreStateJSON struct {
|
||||
Height int64 `json:"height"`
|
||||
}
|
||||
@@ -226,7 +224,7 @@ type BlockStoreStateJSON struct {
|
||||
func (bsj BlockStoreStateJSON) Save(db dbm.DB) {
|
||||
bytes, err := cdc.MarshalJSON(bsj)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Could not marshal state bytes: %v", err))
|
||||
cmn.PanicSanity(fmt.Sprintf("Could not marshal state bytes: %v", err))
|
||||
}
|
||||
db.SetSync(blockStoreKey, bytes)
|
||||
}
|
||||
@@ -1,63 +1,21 @@
|
||||
package store
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
db "github.com/tendermint/tm-db"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/db"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
)
|
||||
|
||||
// A cleanupFunc cleans up any config / test files created for a particular
|
||||
// test.
|
||||
type cleanupFunc func()
|
||||
|
||||
// make a Commit with a single vote containing just the height and a timestamp
|
||||
func makeTestCommit(height int64, timestamp time.Time) *types.Commit {
|
||||
commitSigs := []*types.CommitSig{{Height: height, Timestamp: timestamp}}
|
||||
return types.NewCommit(types.BlockID{}, commitSigs)
|
||||
}
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
|
||||
func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
// blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB())
|
||||
// stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB())
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile())
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "error constructing state from genesis file"))
|
||||
}
|
||||
return state, NewBlockStore(blockDB), func() { os.RemoveAll(config.RootDir) }
|
||||
}
|
||||
|
||||
func TestLoadBlockStoreStateJSON(t *testing.T) {
|
||||
db := db.NewMemDB()
|
||||
|
||||
@@ -84,7 +42,6 @@ func TestNewBlockStore(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, tt := range panicCausers {
|
||||
tt := tt
|
||||
// Expecting a panic here on trying to parse an invalid blockStore
|
||||
_, _, panicErr := doFn(func() (interface{}, error) {
|
||||
db.Set(blockStoreKey, tt.data)
|
||||
@@ -106,32 +63,20 @@ func freshBlockStore() (*BlockStore, db.DB) {
|
||||
}
|
||||
|
||||
var (
|
||||
state sm.State
|
||||
block *types.Block
|
||||
partSet *types.PartSet
|
||||
part1 *types.Part
|
||||
part2 *types.Part
|
||||
seenCommit1 *types.Commit
|
||||
)
|
||||
state, _ = makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
var cleanup cleanupFunc
|
||||
state, _, cleanup = makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||
block = makeBlock(1, state, new(types.Commit))
|
||||
partSet = block.MakePartSet(2)
|
||||
part1 = partSet.GetPart(0)
|
||||
part2 = partSet.GetPart(1)
|
||||
seenCommit1 = makeTestCommit(10, tmtime.Now())
|
||||
code := m.Run()
|
||||
cleanup()
|
||||
os.Exit(code)
|
||||
}
|
||||
block = makeBlock(1, state)
|
||||
partSet = block.MakePartSet(2)
|
||||
part1 = partSet.GetPart(0)
|
||||
part2 = partSet.GetPart(1)
|
||||
seenCommit1 = &types.Commit{Precommits: []*types.Vote{{Height: 10,
|
||||
Timestamp: tmtime.Now()}}}
|
||||
)
|
||||
|
||||
// TODO: This test should be simplified ...
|
||||
|
||||
func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||
defer cleanup()
|
||||
state, bs := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
|
||||
|
||||
// check there are no blocks at various heights
|
||||
@@ -143,9 +88,10 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
// save a block
|
||||
block := makeBlock(bs.Height()+1, state, new(types.Commit))
|
||||
block := makeBlock(bs.Height()+1, state)
|
||||
validPartSet := block.MakePartSet(2)
|
||||
seenCommit := makeTestCommit(10, tmtime.Now())
|
||||
seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10,
|
||||
Timestamp: tmtime.Now()}}}
|
||||
bs.SaveBlock(block, partSet, seenCommit)
|
||||
require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed")
|
||||
|
||||
@@ -164,7 +110,8 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
|
||||
// End of setup, test data
|
||||
|
||||
commitAtH10 := makeTestCommit(10, tmtime.Now())
|
||||
commitAtH10 := &types.Commit{Precommits: []*types.Vote{{Height: 10,
|
||||
Timestamp: tmtime.Now()}}}
|
||||
tuples := []struct {
|
||||
block *types.Block
|
||||
parts *types.PartSet
|
||||
@@ -254,7 +201,6 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, tuple := range tuples {
|
||||
tuple := tuple
|
||||
bs, db := freshBlockStore()
|
||||
// SaveBlock
|
||||
res, err, panicErr := doFn(func() (interface{}, error) {
|
||||
@@ -348,7 +294,7 @@ func TestLoadBlockPart(t *testing.T) {
|
||||
gotPart, _, panicErr := doFn(loadPart)
|
||||
require.Nil(t, panicErr, "an existent and proper block should not panic")
|
||||
require.Nil(t, res, "a properly saved block should return a proper block")
|
||||
require.Equal(t, gotPart.(*types.Part), part1,
|
||||
require.Equal(t, gotPart.(*types.Part).Hash(), part1.Hash(),
|
||||
"expecting successful retrieval of previously saved block")
|
||||
}
|
||||
|
||||
@@ -383,13 +329,14 @@ func TestLoadBlockMeta(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlockFetchAtHeight(t *testing.T) {
|
||||
state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||
defer cleanup()
|
||||
state, bs := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer)))
|
||||
require.Equal(t, bs.Height(), int64(0), "initially the height should be zero")
|
||||
block := makeBlock(bs.Height()+1, state, new(types.Commit))
|
||||
block := makeBlock(bs.Height()+1, state)
|
||||
|
||||
partSet := block.MakePartSet(2)
|
||||
seenCommit := makeTestCommit(10, tmtime.Now())
|
||||
seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10,
|
||||
Timestamp: tmtime.Now()}}}
|
||||
|
||||
bs.SaveBlock(block, partSet, seenCommit)
|
||||
require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed")
|
||||
|
||||
@@ -1,224 +0,0 @@
|
||||
package v0
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func init() {
|
||||
peerTimeout = 2 * time.Second
|
||||
}
|
||||
|
||||
type testPeer struct {
|
||||
id p2p.ID
|
||||
height int64
|
||||
inputChan chan inputData //make sure each peer's data is sequential
|
||||
}
|
||||
|
||||
type inputData struct {
|
||||
t *testing.T
|
||||
pool *BlockPool
|
||||
request BlockRequest
|
||||
}
|
||||
|
||||
func (p testPeer) runInputRoutine() {
|
||||
go func() {
|
||||
for input := range p.inputChan {
|
||||
p.simulateInput(input)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// Request desired, pretend like we got the block immediately.
|
||||
func (p testPeer) simulateInput(input inputData) {
|
||||
block := &types.Block{Header: types.Header{Height: input.request.Height}}
|
||||
input.pool.AddBlock(input.request.PeerID, block, 123)
|
||||
// TODO: uncommenting this creates a race which is detected by: https://github.com/golang/go/blob/2bd767b1022dd3254bcec469f0ee164024726486/src/testing/testing.go#L854-L856
|
||||
// see: https://github.com/tendermint/tendermint/issues/3390#issue-418379890
|
||||
// input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height)
|
||||
}
|
||||
|
||||
type testPeers map[p2p.ID]testPeer
|
||||
|
||||
func (ps testPeers) start() {
|
||||
for _, v := range ps {
|
||||
v.runInputRoutine()
|
||||
}
|
||||
}
|
||||
|
||||
func (ps testPeers) stop() {
|
||||
for _, v := range ps {
|
||||
close(v.inputChan)
|
||||
}
|
||||
}
|
||||
|
||||
func makePeers(numPeers int, minHeight, maxHeight int64) testPeers {
|
||||
peers := make(testPeers, numPeers)
|
||||
for i := 0; i < numPeers; i++ {
|
||||
peerID := p2p.ID(cmn.RandStr(12))
|
||||
height := minHeight + cmn.RandInt63n(maxHeight-minHeight)
|
||||
peers[peerID] = testPeer{peerID, height, make(chan inputData, 10)}
|
||||
}
|
||||
return peers
|
||||
}
|
||||
|
||||
func TestBlockPoolBasic(t *testing.T) {
|
||||
start := int64(42)
|
||||
peers := makePeers(10, start+1, 1000)
|
||||
errorsCh := make(chan peerError, 1000)
|
||||
requestsCh := make(chan BlockRequest, 1000)
|
||||
pool := NewBlockPool(start, requestsCh, errorsCh)
|
||||
pool.SetLogger(log.TestingLogger())
|
||||
|
||||
err := pool.Start()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
defer pool.Stop()
|
||||
|
||||
peers.start()
|
||||
defer peers.stop()
|
||||
|
||||
// Introduce each peer.
|
||||
go func() {
|
||||
for _, peer := range peers {
|
||||
pool.SetPeerHeight(peer.id, peer.height)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start a goroutine to pull blocks
|
||||
go func() {
|
||||
for {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
first, second := pool.PeekTwoBlocks()
|
||||
if first != nil && second != nil {
|
||||
pool.PopRequest()
|
||||
} else {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Pull from channels
|
||||
for {
|
||||
select {
|
||||
case err := <-errorsCh:
|
||||
t.Error(err)
|
||||
case request := <-requestsCh:
|
||||
t.Logf("Pulled new BlockRequest %v", request)
|
||||
if request.Height == 300 {
|
||||
return // Done!
|
||||
}
|
||||
|
||||
peers[request.PeerID].inputChan <- inputData{t, pool, request}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolTimeout(t *testing.T) {
|
||||
start := int64(42)
|
||||
peers := makePeers(10, start+1, 1000)
|
||||
errorsCh := make(chan peerError, 1000)
|
||||
requestsCh := make(chan BlockRequest, 1000)
|
||||
pool := NewBlockPool(start, requestsCh, errorsCh)
|
||||
pool.SetLogger(log.TestingLogger())
|
||||
err := pool.Start()
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
defer pool.Stop()
|
||||
|
||||
for _, peer := range peers {
|
||||
t.Logf("Peer %v", peer.id)
|
||||
}
|
||||
|
||||
// Introduce each peer.
|
||||
go func() {
|
||||
for _, peer := range peers {
|
||||
pool.SetPeerHeight(peer.id, peer.height)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start a goroutine to pull blocks
|
||||
go func() {
|
||||
for {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
first, second := pool.PeekTwoBlocks()
|
||||
if first != nil && second != nil {
|
||||
pool.PopRequest()
|
||||
} else {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Pull from channels
|
||||
counter := 0
|
||||
timedOut := map[p2p.ID]struct{}{}
|
||||
for {
|
||||
select {
|
||||
case err := <-errorsCh:
|
||||
t.Log(err)
|
||||
// consider error to be always timeout here
|
||||
if _, ok := timedOut[err.peerID]; !ok {
|
||||
counter++
|
||||
if counter == len(peers) {
|
||||
return // Done!
|
||||
}
|
||||
}
|
||||
case request := <-requestsCh:
|
||||
t.Logf("Pulled new BlockRequest %+v", request)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
peers := make(testPeers, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
peerID := p2p.ID(fmt.Sprintf("%d", i+1))
|
||||
height := int64(i + 1)
|
||||
peers[peerID] = testPeer{peerID, height, make(chan inputData)}
|
||||
}
|
||||
requestsCh := make(chan BlockRequest)
|
||||
errorsCh := make(chan peerError)
|
||||
|
||||
pool := NewBlockPool(1, requestsCh, errorsCh)
|
||||
pool.SetLogger(log.TestingLogger())
|
||||
err := pool.Start()
|
||||
require.NoError(t, err)
|
||||
defer pool.Stop()
|
||||
|
||||
// add peers
|
||||
for peerID, peer := range peers {
|
||||
pool.SetPeerHeight(peerID, peer.height)
|
||||
}
|
||||
assert.EqualValues(t, 10, pool.MaxPeerHeight())
|
||||
|
||||
// remove not-existing peer
|
||||
assert.NotPanics(t, func() { pool.RemovePeer(p2p.ID("Superman")) })
|
||||
|
||||
// remove peer with biggest height
|
||||
pool.RemovePeer(p2p.ID("10"))
|
||||
assert.EqualValues(t, 9, pool.MaxPeerHeight())
|
||||
|
||||
// remove all peers
|
||||
for peerID := range peers {
|
||||
pool.RemovePeer(peerID)
|
||||
}
|
||||
|
||||
assert.EqualValues(t, 0, pool.MaxPeerHeight())
|
||||
}
|
||||
@@ -1,376 +0,0 @@
|
||||
package v0
|
||||
|
||||
import (
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
var config *cfg.Config
|
||||
|
||||
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
for i := 0; i < numValidators; i++ {
|
||||
val, privVal := types.RandValidator(randPower, minPower)
|
||||
validators[i] = types.GenesisValidator{
|
||||
PubKey: val.PubKey,
|
||||
Power: val.VotingPower,
|
||||
}
|
||||
privValidators[i] = privVal
|
||||
}
|
||||
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
||||
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: config.ChainID(),
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
type BlockchainReactorPair struct {
|
||||
reactor *BlockchainReactor
|
||||
app proxy.AppConns
|
||||
}
|
||||
|
||||
func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals []types.PrivValidator, maxBlockHeight int64) BlockchainReactorPair {
|
||||
if len(privVals) != 1 {
|
||||
panic("only support one validator")
|
||||
}
|
||||
|
||||
app := &testApp{}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "error start app"))
|
||||
}
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "error constructing state from genesis file"))
|
||||
}
|
||||
|
||||
// Make the BlockchainReactor itself.
|
||||
// NOTE we have to create and commit the blocks first because
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
blockExec := sm.NewBlockExecutor(db, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.MockEvidencePool{})
|
||||
sm.SaveState(db, state)
|
||||
|
||||
// let's add some blocks in
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := types.NewCommit(types.BlockID{}, nil)
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
|
||||
vote, err := types.MakeVote(lastBlock.Header.Height, lastBlockMeta.BlockID, state.Validators, privVals[0], lastBlock.Header.ChainID)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
voteCommitSig := vote.CommitSig()
|
||||
lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{voteCommitSig})
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartsHeader: thisParts.Header()}
|
||||
|
||||
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "error apply block"))
|
||||
}
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
|
||||
bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||
bcReactor.SetLogger(logger.With("module", "blockchain"))
|
||||
|
||||
return BlockchainReactorPair{bcReactor, proxyApp}
|
||||
}
|
||||
|
||||
func TestNoBlockResponse(t *testing.T) {
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
maxBlockHeight := int64(65)
|
||||
|
||||
reactorPairs := make([]BlockchainReactorPair, 2)
|
||||
|
||||
reactorPairs[0] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, maxBlockHeight)
|
||||
reactorPairs[1] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
|
||||
|
||||
p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
r.reactor.Stop()
|
||||
r.app.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
tests := []struct {
|
||||
height int64
|
||||
existent bool
|
||||
}{
|
||||
{maxBlockHeight + 2, false},
|
||||
{10, true},
|
||||
{1, true},
|
||||
{100, false},
|
||||
}
|
||||
|
||||
for {
|
||||
if reactorPairs[1].reactor.pool.IsCaughtUp() {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
assert.Equal(t, maxBlockHeight, reactorPairs[0].reactor.store.Height())
|
||||
|
||||
for _, tt := range tests {
|
||||
block := reactorPairs[1].reactor.store.LoadBlock(tt.height)
|
||||
if tt.existent {
|
||||
assert.True(t, block != nil)
|
||||
} else {
|
||||
assert.True(t, block == nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: This is too hard to test without
|
||||
// an easy way to add test peer to switch
|
||||
// or without significant refactoring of the module.
|
||||
// Alternatively we could actually dial a TCP conn but
|
||||
// that seems extreme.
|
||||
func TestBadBlockStopsPeer(t *testing.T) {
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
maxBlockHeight := int64(148)
|
||||
|
||||
otherChain := newBlockchainReactor(log.TestingLogger(), genDoc, privVals, maxBlockHeight)
|
||||
defer func() {
|
||||
otherChain.reactor.Stop()
|
||||
otherChain.app.Stop()
|
||||
}()
|
||||
|
||||
reactorPairs := make([]BlockchainReactorPair, 4)
|
||||
|
||||
reactorPairs[0] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, maxBlockHeight)
|
||||
reactorPairs[1] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
|
||||
reactorPairs[2] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
|
||||
reactorPairs[3] = newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
|
||||
|
||||
switches := p2p.MakeConnectedSwitches(config.P2P, 4, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
r.reactor.Stop()
|
||||
r.app.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
if reactorPairs[3].reactor.pool.IsCaughtUp() {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
//at this time, reactors[0-3] is the newest
|
||||
assert.Equal(t, 3, reactorPairs[1].reactor.Switch.Peers().Size())
|
||||
|
||||
//mark reactorPairs[3] is an invalid peer
|
||||
reactorPairs[3].reactor.store = otherChain.reactor.store
|
||||
|
||||
lastReactorPair := newBlockchainReactor(log.TestingLogger(), genDoc, privVals, 0)
|
||||
reactorPairs = append(reactorPairs, lastReactorPair)
|
||||
|
||||
switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[len(reactorPairs)-1].reactor)
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)...)
|
||||
|
||||
for i := 0; i < len(reactorPairs)-1; i++ {
|
||||
p2p.Connect2Switches(switches, i, len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
for {
|
||||
if lastReactorPair.reactor.pool.IsCaughtUp() || lastReactorPair.reactor.Switch.Peers().Size() == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
assert.True(t, lastReactorPair.reactor.Switch.Peers().Size() < len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
func TestBcBlockRequestMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
requestHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Request Message", 0, false},
|
||||
{"Valid Request Message", 1, false},
|
||||
{"Invalid Request Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
request := bcBlockRequestMessage{Height: tc.requestHeight}
|
||||
assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcNoBlockResponseMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
nonResponseHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Non-Response Message", 0, false},
|
||||
{"Valid Non-Response Message", 1, false},
|
||||
{"Invalid Non-Response Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
nonResponse := bcNoBlockResponseMessage{Height: tc.nonResponseHeight}
|
||||
assert.Equal(t, tc.expectErr, nonResponse.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcStatusRequestMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
requestHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Request Message", 0, false},
|
||||
{"Valid Request Message", 1, false},
|
||||
{"Invalid Request Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
request := bcStatusRequestMessage{Height: tc.requestHeight}
|
||||
assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcStatusResponseMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
responseHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Response Message", 0, false},
|
||||
{"Valid Response Message", 1, false},
|
||||
{"Invalid Response Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
response := bcStatusResponseMessage{Height: tc.responseHeight}
|
||||
assert.Equal(t, tc.expectErr, response.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// utility funcs
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
|
||||
type testApp struct {
|
||||
abci.BaseApplication
|
||||
}
|
||||
|
||||
var _ abci.Application = (*testApp)(nil)
|
||||
|
||||
func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) {
|
||||
return abci.ResponseInfo{}
|
||||
}
|
||||
|
||||
func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock {
|
||||
return abci.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock {
|
||||
return abci.ResponseEndBlock{}
|
||||
}
|
||||
|
||||
func (app *testApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
|
||||
return abci.ResponseDeliverTx{Events: []abci.Event{}}
|
||||
}
|
||||
|
||||
func (app *testApp) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
|
||||
return abci.ResponseCheckTx{}
|
||||
}
|
||||
|
||||
func (app *testApp) Commit() abci.ResponseCommit {
|
||||
return abci.ResponseCommit{}
|
||||
}
|
||||
|
||||
func (app *testApp) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) {
|
||||
return
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var cdc = amino.NewCodec()
|
||||
|
||||
func init() {
|
||||
RegisterBlockchainMessages(cdc)
|
||||
types.RegisterBlockAmino(cdc)
|
||||
}
|
||||
@@ -1,209 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
flow "github.com/tendermint/tendermint/libs/flowrate"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
//--------
|
||||
// Peer
|
||||
|
||||
// BpPeerParams stores the peer parameters that are used when creating a peer.
|
||||
type BpPeerParams struct {
|
||||
timeout time.Duration
|
||||
minRecvRate int64
|
||||
sampleRate time.Duration
|
||||
windowSize time.Duration
|
||||
}
|
||||
|
||||
// BpPeer is the datastructure associated with a fast sync peer.
|
||||
type BpPeer struct {
|
||||
logger log.Logger
|
||||
ID p2p.ID
|
||||
|
||||
Height int64 // the peer reported height
|
||||
NumPendingBlockRequests int // number of requests still waiting for block responses
|
||||
blocks map[int64]*types.Block // blocks received or expected to be received from this peer
|
||||
blockResponseTimer *time.Timer
|
||||
recvMonitor *flow.Monitor
|
||||
params *BpPeerParams // parameters for timer and monitor
|
||||
|
||||
onErr func(err error, peerID p2p.ID) // function to call on error
|
||||
}
|
||||
|
||||
// NewBpPeer creates a new peer.
|
||||
func NewBpPeer(
|
||||
peerID p2p.ID, height int64, onErr func(err error, peerID p2p.ID), params *BpPeerParams) *BpPeer {
|
||||
|
||||
if params == nil {
|
||||
params = BpPeerDefaultParams()
|
||||
}
|
||||
return &BpPeer{
|
||||
ID: peerID,
|
||||
Height: height,
|
||||
blocks: make(map[int64]*types.Block, maxRequestsPerPeer),
|
||||
logger: log.NewNopLogger(),
|
||||
onErr: onErr,
|
||||
params: params,
|
||||
}
|
||||
}
|
||||
|
||||
// String returns a string representation of a peer.
|
||||
func (peer *BpPeer) String() string {
|
||||
return fmt.Sprintf("peer: %v height: %v pending: %v", peer.ID, peer.Height, peer.NumPendingBlockRequests)
|
||||
}
|
||||
|
||||
// SetLogger sets the logger of the peer.
|
||||
func (peer *BpPeer) SetLogger(l log.Logger) {
|
||||
peer.logger = l
|
||||
}
|
||||
|
||||
// Cleanup performs cleanup of the peer, removes blocks, requests, stops timer and monitor.
|
||||
func (peer *BpPeer) Cleanup() {
|
||||
if peer.blockResponseTimer != nil {
|
||||
peer.blockResponseTimer.Stop()
|
||||
}
|
||||
if peer.NumPendingBlockRequests != 0 {
|
||||
peer.logger.Info("peer with pending requests is being cleaned", "peer", peer.ID)
|
||||
}
|
||||
if len(peer.blocks)-peer.NumPendingBlockRequests != 0 {
|
||||
peer.logger.Info("peer with pending blocks is being cleaned", "peer", peer.ID)
|
||||
}
|
||||
for h := range peer.blocks {
|
||||
delete(peer.blocks, h)
|
||||
}
|
||||
peer.NumPendingBlockRequests = 0
|
||||
peer.recvMonitor = nil
|
||||
}
|
||||
|
||||
// BlockAtHeight returns the block at a given height if available and errMissingBlock otherwise.
|
||||
func (peer *BpPeer) BlockAtHeight(height int64) (*types.Block, error) {
|
||||
block, ok := peer.blocks[height]
|
||||
if !ok {
|
||||
return nil, errMissingBlock
|
||||
}
|
||||
if block == nil {
|
||||
return nil, errMissingBlock
|
||||
}
|
||||
return peer.blocks[height], nil
|
||||
}
|
||||
|
||||
// AddBlock adds a block at peer level. Block must be non-nil and recvSize a positive integer
|
||||
// The peer must have a pending request for this block.
|
||||
func (peer *BpPeer) AddBlock(block *types.Block, recvSize int) error {
|
||||
if block == nil || recvSize < 0 {
|
||||
panic("bad parameters")
|
||||
}
|
||||
existingBlock, ok := peer.blocks[block.Height]
|
||||
if !ok {
|
||||
peer.logger.Error("unsolicited block", "blockHeight", block.Height, "peer", peer.ID)
|
||||
return errMissingBlock
|
||||
}
|
||||
if existingBlock != nil {
|
||||
peer.logger.Error("already have a block for height", "height", block.Height)
|
||||
return errDuplicateBlock
|
||||
}
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
panic("peer does not have pending requests")
|
||||
}
|
||||
peer.blocks[block.Height] = block
|
||||
peer.NumPendingBlockRequests--
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
peer.stopMonitor()
|
||||
peer.stopBlockResponseTimer()
|
||||
} else {
|
||||
peer.recvMonitor.Update(recvSize)
|
||||
peer.resetBlockResponseTimer()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveBlock removes the block of given height
|
||||
func (peer *BpPeer) RemoveBlock(height int64) {
|
||||
delete(peer.blocks, height)
|
||||
}
|
||||
|
||||
// RequestSent records that a request was sent, and starts the peer timer and monitor if needed.
|
||||
func (peer *BpPeer) RequestSent(height int64) {
|
||||
peer.blocks[height] = nil
|
||||
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
peer.startMonitor()
|
||||
peer.resetBlockResponseTimer()
|
||||
}
|
||||
peer.NumPendingBlockRequests++
|
||||
}
|
||||
|
||||
// CheckRate verifies that the response rate of the peer is acceptable (higher than the minimum allowed).
|
||||
func (peer *BpPeer) CheckRate() error {
|
||||
if peer.NumPendingBlockRequests == 0 {
|
||||
return nil
|
||||
}
|
||||
curRate := peer.recvMonitor.Status().CurRate
|
||||
// curRate can be 0 on start
|
||||
if curRate != 0 && curRate < peer.params.minRecvRate {
|
||||
err := errSlowPeer
|
||||
peer.logger.Error("SendTimeout", "peer", peer,
|
||||
"reason", err,
|
||||
"curRate", fmt.Sprintf("%d KB/s", curRate/1024),
|
||||
"minRate", fmt.Sprintf("%d KB/s", peer.params.minRecvRate/1024))
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (peer *BpPeer) onTimeout() {
|
||||
peer.onErr(errNoPeerResponse, peer.ID)
|
||||
}
|
||||
|
||||
func (peer *BpPeer) stopMonitor() {
|
||||
peer.recvMonitor.Done()
|
||||
peer.recvMonitor = nil
|
||||
}
|
||||
|
||||
func (peer *BpPeer) startMonitor() {
|
||||
peer.recvMonitor = flow.New(peer.params.sampleRate, peer.params.windowSize)
|
||||
initialValue := float64(peer.params.minRecvRate) * math.E
|
||||
peer.recvMonitor.SetREMA(initialValue)
|
||||
}
|
||||
|
||||
func (peer *BpPeer) resetBlockResponseTimer() {
|
||||
if peer.blockResponseTimer == nil {
|
||||
peer.blockResponseTimer = time.AfterFunc(peer.params.timeout, peer.onTimeout)
|
||||
} else {
|
||||
peer.blockResponseTimer.Reset(peer.params.timeout)
|
||||
}
|
||||
}
|
||||
|
||||
func (peer *BpPeer) stopBlockResponseTimer() bool {
|
||||
if peer.blockResponseTimer == nil {
|
||||
return false
|
||||
}
|
||||
return peer.blockResponseTimer.Stop()
|
||||
}
|
||||
|
||||
// BpPeerDefaultParams returns the default peer parameters.
|
||||
func BpPeerDefaultParams() *BpPeerParams {
|
||||
return &BpPeerParams{
|
||||
// Timeout for a peer to respond to a block request.
|
||||
timeout: 15 * time.Second,
|
||||
|
||||
// Minimum recv rate to ensure we're receiving blocks from a peer fast
|
||||
// enough. If a peer is not sending data at at least that rate, we
|
||||
// consider them to have timedout and we disconnect.
|
||||
//
|
||||
// Assuming a DSL connection (not a good choice) 128 Kbps (upload) ~ 15 KB/s,
|
||||
// sending data across atlantic ~ 7.5 KB/s.
|
||||
minRecvRate: int64(7680),
|
||||
|
||||
// Monitor parameters
|
||||
sampleRate: time.Second,
|
||||
windowSize: 40 * time.Second,
|
||||
}
|
||||
}
|
||||
@@ -1,280 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestPeerMonitor(t *testing.T) {
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(cmn.RandStr(12)), 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
nil)
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
peer.startMonitor()
|
||||
assert.NotNil(t, peer.recvMonitor)
|
||||
peer.stopMonitor()
|
||||
assert.Nil(t, peer.recvMonitor)
|
||||
}
|
||||
|
||||
func TestPeerResetBlockResponseTimer(t *testing.T) {
|
||||
var (
|
||||
numErrFuncCalls int // number of calls to the errFunc
|
||||
lastErr error // last generated error
|
||||
peerTestMtx sync.Mutex // modifications of ^^ variables are also done from timer handler goroutine
|
||||
)
|
||||
params := &BpPeerParams{timeout: 2 * time.Millisecond}
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(cmn.RandStr(12)), 10,
|
||||
func(err error, _ p2p.ID) {
|
||||
peerTestMtx.Lock()
|
||||
defer peerTestMtx.Unlock()
|
||||
lastErr = err
|
||||
numErrFuncCalls++
|
||||
},
|
||||
params)
|
||||
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
checkByStoppingPeerTimer(t, peer, false)
|
||||
|
||||
// initial reset call with peer having a nil timer
|
||||
peer.resetBlockResponseTimer()
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
// make sure timer is running and stop it
|
||||
checkByStoppingPeerTimer(t, peer, true)
|
||||
|
||||
// reset with running timer
|
||||
peer.resetBlockResponseTimer()
|
||||
time.Sleep(time.Millisecond)
|
||||
peer.resetBlockResponseTimer()
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
|
||||
// let the timer expire and ...
|
||||
time.Sleep(3 * time.Millisecond)
|
||||
// ... check timer is not running
|
||||
checkByStoppingPeerTimer(t, peer, false)
|
||||
|
||||
peerTestMtx.Lock()
|
||||
// ... check errNoPeerResponse has been sent
|
||||
assert.Equal(t, 1, numErrFuncCalls)
|
||||
assert.Equal(t, lastErr, errNoPeerResponse)
|
||||
peerTestMtx.Unlock()
|
||||
}
|
||||
|
||||
func TestPeerRequestSent(t *testing.T) {
|
||||
params := &BpPeerParams{timeout: 2 * time.Millisecond}
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(cmn.RandStr(12)), 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
params)
|
||||
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
peer.RequestSent(1)
|
||||
assert.NotNil(t, peer.recvMonitor)
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
assert.Equal(t, 1, peer.NumPendingBlockRequests)
|
||||
|
||||
peer.RequestSent(1)
|
||||
assert.NotNil(t, peer.recvMonitor)
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
assert.Equal(t, 2, peer.NumPendingBlockRequests)
|
||||
}
|
||||
|
||||
func TestPeerGetAndRemoveBlock(t *testing.T) {
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(cmn.RandStr(12)), 100,
|
||||
func(err error, _ p2p.ID) {},
|
||||
nil)
|
||||
|
||||
// Change peer height
|
||||
peer.Height = int64(10)
|
||||
assert.Equal(t, int64(10), peer.Height)
|
||||
|
||||
// request some blocks and receive few of them
|
||||
for i := 1; i <= 10; i++ {
|
||||
peer.RequestSent(int64(i))
|
||||
if i > 5 {
|
||||
// only receive blocks 1..5
|
||||
continue
|
||||
}
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 10)
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
height int64
|
||||
wantErr error
|
||||
blockPresent bool
|
||||
}{
|
||||
{"no request", 100, errMissingBlock, false},
|
||||
{"no block", 6, errMissingBlock, false},
|
||||
{"block 1 present", 1, nil, true},
|
||||
{"block max present", 5, nil, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// try to get the block
|
||||
b, err := peer.BlockAtHeight(tt.height)
|
||||
assert.Equal(t, tt.wantErr, err)
|
||||
assert.Equal(t, tt.blockPresent, b != nil)
|
||||
|
||||
// remove the block
|
||||
peer.RemoveBlock(tt.height)
|
||||
_, err = peer.BlockAtHeight(tt.height)
|
||||
assert.Equal(t, errMissingBlock, err)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerAddBlock(t *testing.T) {
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(cmn.RandStr(12)), 100,
|
||||
func(err error, _ p2p.ID) {},
|
||||
nil)
|
||||
|
||||
// request some blocks, receive one
|
||||
for i := 1; i <= 10; i++ {
|
||||
peer.RequestSent(int64(i))
|
||||
if i == 5 {
|
||||
// receive block 5
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 10)
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
height int64
|
||||
wantErr error
|
||||
blockPresent bool
|
||||
}{
|
||||
{"no request", 50, errMissingBlock, false},
|
||||
{"duplicate block", 5, errDuplicateBlock, true},
|
||||
{"block 1 successfully received", 1, nil, true},
|
||||
{"block max successfully received", 10, nil, true},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// try to get the block
|
||||
err := peer.AddBlock(makeSmallBlock(int(tt.height)), 10)
|
||||
assert.Equal(t, tt.wantErr, err)
|
||||
_, err = peer.BlockAtHeight(tt.height)
|
||||
assert.Equal(t, tt.blockPresent, err == nil)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPeerOnErrFuncCalledDueToExpiration(t *testing.T) {
|
||||
|
||||
params := &BpPeerParams{timeout: 2 * time.Millisecond}
|
||||
var (
|
||||
numErrFuncCalls int // number of calls to the onErr function
|
||||
lastErr error // last generated error
|
||||
peerTestMtx sync.Mutex // modifications of ^^ variables are also done from timer handler goroutine
|
||||
)
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(cmn.RandStr(12)), 10,
|
||||
func(err error, _ p2p.ID) {
|
||||
peerTestMtx.Lock()
|
||||
defer peerTestMtx.Unlock()
|
||||
lastErr = err
|
||||
numErrFuncCalls++
|
||||
},
|
||||
params)
|
||||
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
peer.RequestSent(1)
|
||||
time.Sleep(4 * time.Millisecond)
|
||||
// timer should have expired by now, check that the on error function was called
|
||||
peerTestMtx.Lock()
|
||||
assert.Equal(t, 1, numErrFuncCalls)
|
||||
assert.Equal(t, errNoPeerResponse, lastErr)
|
||||
peerTestMtx.Unlock()
|
||||
}
|
||||
|
||||
func TestPeerCheckRate(t *testing.T) {
|
||||
params := &BpPeerParams{
|
||||
timeout: time.Second,
|
||||
minRecvRate: int64(100), // 100 bytes/sec exponential moving average
|
||||
}
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(cmn.RandStr(12)), 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
params)
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
require.Nil(t, peer.CheckRate())
|
||||
|
||||
for i := 0; i < 40; i++ {
|
||||
peer.RequestSent(int64(i))
|
||||
}
|
||||
|
||||
// monitor starts with a higher rEMA (~ 2*minRecvRate), wait for it to go down
|
||||
time.Sleep(900 * time.Millisecond)
|
||||
|
||||
// normal peer - send a bit more than 100 bytes/sec, > 10 bytes/100msec, check peer is not considered slow
|
||||
for i := 0; i < 10; i++ {
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 11)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
require.Nil(t, peer.CheckRate())
|
||||
}
|
||||
|
||||
// slow peer - send a bit less than 10 bytes/100msec
|
||||
for i := 10; i < 20; i++ {
|
||||
_ = peer.AddBlock(makeSmallBlock(i), 9)
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
// check peer is considered slow
|
||||
assert.Equal(t, errSlowPeer, peer.CheckRate())
|
||||
}
|
||||
|
||||
func TestPeerCleanup(t *testing.T) {
|
||||
params := &BpPeerParams{timeout: 2 * time.Millisecond}
|
||||
|
||||
peer := NewBpPeer(
|
||||
p2p.ID(cmn.RandStr(12)), 10,
|
||||
func(err error, _ p2p.ID) {},
|
||||
params)
|
||||
peer.SetLogger(log.TestingLogger())
|
||||
|
||||
assert.Nil(t, peer.blockResponseTimer)
|
||||
peer.RequestSent(1)
|
||||
assert.NotNil(t, peer.blockResponseTimer)
|
||||
|
||||
peer.Cleanup()
|
||||
checkByStoppingPeerTimer(t, peer, false)
|
||||
}
|
||||
|
||||
// Check if peer timer is running or not (a running timer can be successfully stopped).
|
||||
// Note: stops the timer.
|
||||
func checkByStoppingPeerTimer(t *testing.T, peer *BpPeer, running bool) {
|
||||
assert.NotPanics(t, func() {
|
||||
stopped := peer.stopBlockResponseTimer()
|
||||
if running {
|
||||
assert.True(t, stopped)
|
||||
} else {
|
||||
assert.False(t, stopped)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func makeSmallBlock(height int) *types.Block {
|
||||
return types.MakeBlock(int64(height), []types.Tx{types.Tx("foo")}, nil, nil)
|
||||
}
|
||||
@@ -1,369 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// BlockPool keeps track of the fast sync peers, block requests and block responses.
|
||||
type BlockPool struct {
|
||||
logger log.Logger
|
||||
// Set of peers that have sent status responses, with height bigger than pool.Height
|
||||
peers map[p2p.ID]*BpPeer
|
||||
// Set of block heights and the corresponding peers from where a block response is expected or has been received.
|
||||
blocks map[int64]p2p.ID
|
||||
|
||||
plannedRequests map[int64]struct{} // list of blocks to be assigned peers for blockRequest
|
||||
nextRequestHeight int64 // next height to be added to plannedRequests
|
||||
|
||||
Height int64 // height of next block to execute
|
||||
MaxPeerHeight int64 // maximum height of all peers
|
||||
toBcR bcReactor
|
||||
}
|
||||
|
||||
// NewBlockPool creates a new BlockPool.
|
||||
func NewBlockPool(height int64, toBcR bcReactor) *BlockPool {
|
||||
return &BlockPool{
|
||||
Height: height,
|
||||
MaxPeerHeight: 0,
|
||||
peers: make(map[p2p.ID]*BpPeer),
|
||||
blocks: make(map[int64]p2p.ID),
|
||||
plannedRequests: make(map[int64]struct{}),
|
||||
nextRequestHeight: height,
|
||||
toBcR: toBcR,
|
||||
}
|
||||
}
|
||||
|
||||
// SetLogger sets the logger of the pool.
|
||||
func (pool *BlockPool) SetLogger(l log.Logger) {
|
||||
pool.logger = l
|
||||
}
|
||||
|
||||
// ReachedMaxHeight check if the pool has reached the maximum peer height.
|
||||
func (pool *BlockPool) ReachedMaxHeight() bool {
|
||||
return pool.Height >= pool.MaxPeerHeight
|
||||
}
|
||||
|
||||
func (pool *BlockPool) rescheduleRequest(peerID p2p.ID, height int64) {
|
||||
pool.logger.Info("reschedule requests made to peer for height ", "peerID", peerID, "height", height)
|
||||
pool.plannedRequests[height] = struct{}{}
|
||||
delete(pool.blocks, height)
|
||||
pool.peers[peerID].RemoveBlock(height)
|
||||
}
|
||||
|
||||
// Updates the pool's max height. If no peers are left MaxPeerHeight is set to 0.
|
||||
func (pool *BlockPool) updateMaxPeerHeight() {
|
||||
var newMax int64
|
||||
for _, peer := range pool.peers {
|
||||
peerHeight := peer.Height
|
||||
if peerHeight > newMax {
|
||||
newMax = peerHeight
|
||||
}
|
||||
}
|
||||
pool.MaxPeerHeight = newMax
|
||||
}
|
||||
|
||||
// UpdatePeer adds a new peer or updates an existing peer with a new height.
|
||||
// If a peer is short it is not added.
|
||||
func (pool *BlockPool) UpdatePeer(peerID p2p.ID, height int64) error {
|
||||
|
||||
peer := pool.peers[peerID]
|
||||
|
||||
if peer == nil {
|
||||
if height < pool.Height {
|
||||
pool.logger.Info("Peer height too small",
|
||||
"peer", peerID, "height", height, "fsm_height", pool.Height)
|
||||
return errPeerTooShort
|
||||
}
|
||||
// Add new peer.
|
||||
peer = NewBpPeer(peerID, height, pool.toBcR.sendPeerError, nil)
|
||||
peer.SetLogger(pool.logger.With("peer", peerID))
|
||||
pool.peers[peerID] = peer
|
||||
pool.logger.Info("added peer", "peerID", peerID, "height", height, "num_peers", len(pool.peers))
|
||||
} else {
|
||||
// Check if peer is lowering its height. This is not allowed.
|
||||
if height < peer.Height {
|
||||
pool.RemovePeer(peerID, errPeerLowersItsHeight)
|
||||
return errPeerLowersItsHeight
|
||||
}
|
||||
// Update existing peer.
|
||||
peer.Height = height
|
||||
}
|
||||
|
||||
// Update the pool's MaxPeerHeight if needed.
|
||||
pool.updateMaxPeerHeight()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cleans and deletes the peer. Recomputes the max peer height.
|
||||
func (pool *BlockPool) deletePeer(peer *BpPeer) {
|
||||
if peer == nil {
|
||||
return
|
||||
}
|
||||
peer.Cleanup()
|
||||
delete(pool.peers, peer.ID)
|
||||
|
||||
if peer.Height == pool.MaxPeerHeight {
|
||||
pool.updateMaxPeerHeight()
|
||||
}
|
||||
}
|
||||
|
||||
// RemovePeer removes the blocks and requests from the peer, reschedules them and deletes the peer.
|
||||
func (pool *BlockPool) RemovePeer(peerID p2p.ID, err error) {
|
||||
peer := pool.peers[peerID]
|
||||
if peer == nil {
|
||||
return
|
||||
}
|
||||
pool.logger.Info("removing peer", "peerID", peerID, "error", err)
|
||||
|
||||
// Reschedule the block requests made to the peer, or received and not processed yet.
|
||||
// Note that some of the requests may be removed further down.
|
||||
for h := range pool.peers[peerID].blocks {
|
||||
pool.rescheduleRequest(peerID, h)
|
||||
}
|
||||
|
||||
oldMaxPeerHeight := pool.MaxPeerHeight
|
||||
// Delete the peer. This operation may result in the pool's MaxPeerHeight being lowered.
|
||||
pool.deletePeer(peer)
|
||||
|
||||
// Check if the pool's MaxPeerHeight has been lowered.
|
||||
// This may happen if the tallest peer has been removed.
|
||||
if oldMaxPeerHeight > pool.MaxPeerHeight {
|
||||
// Remove any planned requests for heights over the new MaxPeerHeight.
|
||||
for h := range pool.plannedRequests {
|
||||
if h > pool.MaxPeerHeight {
|
||||
delete(pool.plannedRequests, h)
|
||||
}
|
||||
}
|
||||
// Adjust the nextRequestHeight to the new max plus one.
|
||||
if pool.nextRequestHeight > pool.MaxPeerHeight {
|
||||
pool.nextRequestHeight = pool.MaxPeerHeight + 1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) removeShortPeers() {
|
||||
for _, peer := range pool.peers {
|
||||
if peer.Height < pool.Height {
|
||||
pool.RemovePeer(peer.ID, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) removeBadPeers() {
|
||||
pool.removeShortPeers()
|
||||
for _, peer := range pool.peers {
|
||||
if err := peer.CheckRate(); err != nil {
|
||||
pool.RemovePeer(peer.ID, err)
|
||||
pool.toBcR.sendPeerError(err, peer.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// MakeNextRequests creates more requests if the block pool is running low.
|
||||
func (pool *BlockPool) MakeNextRequests(maxNumRequests int) {
|
||||
heights := pool.makeRequestBatch(maxNumRequests)
|
||||
if len(heights) != 0 {
|
||||
pool.logger.Info("makeNextRequests will make following requests",
|
||||
"number", len(heights), "heights", heights)
|
||||
}
|
||||
|
||||
for _, height := range heights {
|
||||
h := int64(height)
|
||||
if !pool.sendRequest(h) {
|
||||
// If a good peer was not found for sending the request at height h then return,
|
||||
// as it shouldn't be possible to find a peer for h+1.
|
||||
return
|
||||
}
|
||||
delete(pool.plannedRequests, h)
|
||||
}
|
||||
}
|
||||
|
||||
// Makes a batch of requests sorted by height such that the block pool has up to maxNumRequests entries.
|
||||
func (pool *BlockPool) makeRequestBatch(maxNumRequests int) []int {
|
||||
pool.removeBadPeers()
|
||||
// At this point pool.requests may include heights for requests to be redone due to removal of peers:
|
||||
// - peers timed out or were removed by switch
|
||||
// - FSM timed out on waiting to advance the block execution due to missing blocks at h or h+1
|
||||
// Determine the number of requests needed by subtracting the number of requests already made from the maximum
|
||||
// allowed
|
||||
numNeeded := maxNumRequests - len(pool.blocks)
|
||||
for len(pool.plannedRequests) < numNeeded {
|
||||
if pool.nextRequestHeight > pool.MaxPeerHeight {
|
||||
break
|
||||
}
|
||||
pool.plannedRequests[pool.nextRequestHeight] = struct{}{}
|
||||
pool.nextRequestHeight++
|
||||
}
|
||||
|
||||
heights := make([]int, 0, len(pool.plannedRequests))
|
||||
for k := range pool.plannedRequests {
|
||||
heights = append(heights, int(k))
|
||||
}
|
||||
sort.Ints(heights)
|
||||
return heights
|
||||
}
|
||||
|
||||
func (pool *BlockPool) sendRequest(height int64) bool {
|
||||
for _, peer := range pool.peers {
|
||||
if peer.NumPendingBlockRequests >= maxRequestsPerPeer {
|
||||
continue
|
||||
}
|
||||
if peer.Height < height {
|
||||
continue
|
||||
}
|
||||
|
||||
err := pool.toBcR.sendBlockRequest(peer.ID, height)
|
||||
if err == errNilPeerForBlockRequest {
|
||||
// Switch does not have this peer, remove it and continue to look for another peer.
|
||||
pool.logger.Error("switch does not have peer..removing peer selected for height", "peer",
|
||||
peer.ID, "height", height)
|
||||
pool.RemovePeer(peer.ID, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if err == errSendQueueFull {
|
||||
pool.logger.Error("peer queue is full", "peer", peer.ID, "height", height)
|
||||
continue
|
||||
}
|
||||
|
||||
pool.logger.Info("assigned request to peer", "peer", peer.ID, "height", height)
|
||||
|
||||
pool.blocks[height] = peer.ID
|
||||
peer.RequestSent(height)
|
||||
|
||||
return true
|
||||
}
|
||||
pool.logger.Error("could not find peer to send request for block at height", "height", height)
|
||||
return false
|
||||
}
|
||||
|
||||
// AddBlock validates that the block comes from the peer it was expected from and stores it in the 'blocks' map.
|
||||
func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int) error {
|
||||
peer, ok := pool.peers[peerID]
|
||||
if !ok {
|
||||
pool.logger.Error("block from unknown peer", "height", block.Height, "peer", peerID)
|
||||
return errBadDataFromPeer
|
||||
}
|
||||
if wantPeerID, ok := pool.blocks[block.Height]; ok && wantPeerID != peerID {
|
||||
pool.logger.Error("block received from wrong peer", "height", block.Height,
|
||||
"peer", peerID, "expected_peer", wantPeerID)
|
||||
return errBadDataFromPeer
|
||||
}
|
||||
|
||||
return peer.AddBlock(block, blockSize)
|
||||
}
|
||||
|
||||
// BlockData stores the peer responsible to deliver a block and the actual block if delivered.
|
||||
type BlockData struct {
|
||||
block *types.Block
|
||||
peer *BpPeer
|
||||
}
|
||||
|
||||
// BlockAndPeerAtHeight retrieves the block and delivery peer at specified height.
|
||||
// Returns errMissingBlock if a block was not found
|
||||
func (pool *BlockPool) BlockAndPeerAtHeight(height int64) (bData *BlockData, err error) {
|
||||
peerID := pool.blocks[height]
|
||||
peer := pool.peers[peerID]
|
||||
if peer == nil {
|
||||
return nil, errMissingBlock
|
||||
}
|
||||
|
||||
block, err := peer.BlockAtHeight(height)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &BlockData{peer: peer, block: block}, nil
|
||||
|
||||
}
|
||||
|
||||
// FirstTwoBlocksAndPeers returns the blocks and the delivery peers at pool's height H and H+1.
|
||||
func (pool *BlockPool) FirstTwoBlocksAndPeers() (first, second *BlockData, err error) {
|
||||
first, err = pool.BlockAndPeerAtHeight(pool.Height)
|
||||
second, err2 := pool.BlockAndPeerAtHeight(pool.Height + 1)
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// InvalidateFirstTwoBlocks removes the peers that sent us the first two blocks, blocks are removed by RemovePeer().
|
||||
func (pool *BlockPool) InvalidateFirstTwoBlocks(err error) {
|
||||
first, err1 := pool.BlockAndPeerAtHeight(pool.Height)
|
||||
second, err2 := pool.BlockAndPeerAtHeight(pool.Height + 1)
|
||||
|
||||
if err1 == nil {
|
||||
pool.RemovePeer(first.peer.ID, err)
|
||||
}
|
||||
if err2 == nil {
|
||||
pool.RemovePeer(second.peer.ID, err)
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessedCurrentHeightBlock performs cleanup after a block is processed. It removes block at pool height and
|
||||
// the peers that are now short.
|
||||
func (pool *BlockPool) ProcessedCurrentHeightBlock() {
|
||||
peerID, peerOk := pool.blocks[pool.Height]
|
||||
if peerOk {
|
||||
pool.peers[peerID].RemoveBlock(pool.Height)
|
||||
}
|
||||
delete(pool.blocks, pool.Height)
|
||||
pool.logger.Debug("removed block at height", "height", pool.Height)
|
||||
pool.Height++
|
||||
pool.removeShortPeers()
|
||||
}
|
||||
|
||||
// RemovePeerAtCurrentHeights checks if a block at pool's height H exists and if not, it removes the
|
||||
// delivery peer and returns. If a block at height H exists then the check and peer removal is done for H+1.
|
||||
// This function is called when the FSM is not able to make progress for some time.
|
||||
// This happens if either the block H or H+1 have not been delivered.
|
||||
func (pool *BlockPool) RemovePeerAtCurrentHeights(err error) {
|
||||
peerID := pool.blocks[pool.Height]
|
||||
peer, ok := pool.peers[peerID]
|
||||
if ok {
|
||||
if _, err := peer.BlockAtHeight(pool.Height); err != nil {
|
||||
pool.logger.Info("remove peer that hasn't sent block at pool.Height",
|
||||
"peer", peerID, "height", pool.Height)
|
||||
pool.RemovePeer(peerID, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
peerID = pool.blocks[pool.Height+1]
|
||||
peer, ok = pool.peers[peerID]
|
||||
if ok {
|
||||
if _, err := peer.BlockAtHeight(pool.Height + 1); err != nil {
|
||||
pool.logger.Info("remove peer that hasn't sent block at pool.Height+1",
|
||||
"peer", peerID, "height", pool.Height+1)
|
||||
pool.RemovePeer(peerID, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup performs pool and peer cleanup
|
||||
func (pool *BlockPool) Cleanup() {
|
||||
for id, peer := range pool.peers {
|
||||
peer.Cleanup()
|
||||
delete(pool.peers, id)
|
||||
}
|
||||
pool.plannedRequests = make(map[int64]struct{})
|
||||
pool.blocks = make(map[int64]p2p.ID)
|
||||
pool.nextRequestHeight = 0
|
||||
pool.Height = 0
|
||||
pool.MaxPeerHeight = 0
|
||||
}
|
||||
|
||||
// NumPeers returns the number of peers in the pool
|
||||
func (pool *BlockPool) NumPeers() int {
|
||||
return len(pool.peers)
|
||||
}
|
||||
|
||||
// NeedsBlocks returns true if more blocks are required.
|
||||
func (pool *BlockPool) NeedsBlocks() bool {
|
||||
return len(pool.blocks) < maxNumRequests
|
||||
}
|
||||
@@ -1,659 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type testPeer struct {
|
||||
id p2p.ID
|
||||
height int64
|
||||
}
|
||||
|
||||
type testBcR struct {
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
type testValues struct {
|
||||
numRequestsSent int
|
||||
}
|
||||
|
||||
var testResults testValues
|
||||
|
||||
func resetPoolTestResults() {
|
||||
testResults.numRequestsSent = 0
|
||||
}
|
||||
|
||||
func (testR *testBcR) sendPeerError(err error, peerID p2p.ID) {
|
||||
}
|
||||
|
||||
func (testR *testBcR) sendStatusRequest() {
|
||||
}
|
||||
|
||||
func (testR *testBcR) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
testResults.numRequestsSent++
|
||||
return nil
|
||||
}
|
||||
|
||||
func (testR *testBcR) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
|
||||
}
|
||||
|
||||
func (testR *testBcR) switchToConsensus() {
|
||||
|
||||
}
|
||||
|
||||
func newTestBcR() *testBcR {
|
||||
testBcR := &testBcR{logger: log.TestingLogger()}
|
||||
return testBcR
|
||||
}
|
||||
|
||||
type tPBlocks struct {
|
||||
id p2p.ID
|
||||
create bool
|
||||
}
|
||||
|
||||
// Makes a block pool with specified current height, list of peers, block requests and block responses
|
||||
func makeBlockPool(bcr *testBcR, height int64, peers []BpPeer, blocks map[int64]tPBlocks) *BlockPool {
|
||||
bPool := NewBlockPool(height, bcr)
|
||||
bPool.SetLogger(bcr.logger)
|
||||
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
|
||||
var maxH int64
|
||||
for _, p := range peers {
|
||||
if p.Height > maxH {
|
||||
maxH = p.Height
|
||||
}
|
||||
bPool.peers[p.ID] = NewBpPeer(p.ID, p.Height, bcr.sendPeerError, nil)
|
||||
bPool.peers[p.ID].SetLogger(bcr.logger)
|
||||
|
||||
}
|
||||
bPool.MaxPeerHeight = maxH
|
||||
for h, p := range blocks {
|
||||
bPool.blocks[h] = p.id
|
||||
bPool.peers[p.id].RequestSent(h)
|
||||
if p.create {
|
||||
// simulate that a block at height h has been received
|
||||
_ = bPool.peers[p.id].AddBlock(types.MakeBlock(h, txs, nil, nil), 100)
|
||||
}
|
||||
}
|
||||
return bPool
|
||||
}
|
||||
|
||||
func assertPeerSetsEquivalent(t *testing.T, set1 map[p2p.ID]*BpPeer, set2 map[p2p.ID]*BpPeer) {
|
||||
assert.Equal(t, len(set1), len(set2))
|
||||
for peerID, peer1 := range set1 {
|
||||
peer2 := set2[peerID]
|
||||
assert.NotNil(t, peer2)
|
||||
assert.Equal(t, peer1.NumPendingBlockRequests, peer2.NumPendingBlockRequests)
|
||||
assert.Equal(t, peer1.Height, peer2.Height)
|
||||
assert.Equal(t, len(peer1.blocks), len(peer2.blocks))
|
||||
for h, block1 := range peer1.blocks {
|
||||
block2 := peer2.blocks[h]
|
||||
// block1 and block2 could be nil if a request was made but no block was received
|
||||
assert.Equal(t, block1, block2)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func assertBlockPoolEquivalent(t *testing.T, poolWanted, pool *BlockPool) {
|
||||
assert.Equal(t, poolWanted.blocks, pool.blocks)
|
||||
assertPeerSetsEquivalent(t, poolWanted.peers, pool.peers)
|
||||
assert.Equal(t, poolWanted.MaxPeerHeight, pool.MaxPeerHeight)
|
||||
assert.Equal(t, poolWanted.Height, pool.Height)
|
||||
|
||||
}
|
||||
|
||||
func TestBlockPoolUpdatePeer(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
args testPeer
|
||||
poolWanted *BlockPool
|
||||
errWanted error
|
||||
}{
|
||||
{
|
||||
name: "add a first short peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 50},
|
||||
errWanted: errPeerTooShort,
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "add a first good peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 101},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 101}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "increase the height of P1 from 120 to 123",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 123},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 123}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "decrease the height of P1 from 120 to 110",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: testPeer{"P1", 110},
|
||||
errWanted: errPeerLowersItsHeight,
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "decrease the height of P1 from 105 to 102 with blocks",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 105}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 101: {"P1", true}, 102: {"P1", true}}),
|
||||
args: testPeer{"P1", 102},
|
||||
errWanted: errPeerLowersItsHeight,
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{},
|
||||
map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pool := tt.pool
|
||||
err := pool.UpdatePeer(tt.args.id, tt.args.height)
|
||||
assert.Equal(t, tt.errWanted, err)
|
||||
assert.Equal(t, tt.poolWanted.blocks, tt.pool.blocks)
|
||||
assertPeerSetsEquivalent(t, tt.poolWanted.peers, tt.pool.peers)
|
||||
assert.Equal(t, tt.poolWanted.MaxPeerHeight, tt.pool.MaxPeerHeight)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
err error
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
args args
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "attempt to delete non-existing peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: args{"P99", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the only peer without blocks",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the shortest of two peers without blocks",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the tallest of two peers without blocks",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 120}}, map[int64]tPBlocks{}),
|
||||
args: args{"P2", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the only peer with block requests sent and blocks received",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the shortest of two peers with block requests sent and blocks received",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 200}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 200}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "delete the tallest of two peers with block requests sent and blocks received",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 110}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
args: args{"P1", nil},
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P2", Height: 110}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.RemovePeer(tt.args.peerID, tt.args.err)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolRemoveShortPeers(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "no short peers",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 110}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 110}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
|
||||
{
|
||||
name: "one short peer",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 90}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P3", Height: 120}}, map[int64]tPBlocks{}),
|
||||
},
|
||||
|
||||
{
|
||||
name: "all short peers",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 90}, {ID: "P2", Height: 91}, {ID: "P3", Height: 92}}, map[int64]tPBlocks{}),
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pool := tt.pool
|
||||
pool.removeShortPeers()
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolSendRequestBatch(t *testing.T) {
|
||||
type testPeerResult struct {
|
||||
id p2p.ID
|
||||
numPendingBlockRequests int
|
||||
}
|
||||
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
maxRequestsPerPeer int
|
||||
expRequests map[int64]bool
|
||||
expPeerResults []testPeerResult
|
||||
expnumPendingBlockRequests int
|
||||
}{
|
||||
{
|
||||
name: "one peer - send up to maxRequestsPerPeer block requests",
|
||||
pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
maxRequestsPerPeer: 2,
|
||||
expRequests: map[int64]bool{10: true, 11: true},
|
||||
expPeerResults: []testPeerResult{{id: "P1", numPendingBlockRequests: 2}},
|
||||
expnumPendingBlockRequests: 2,
|
||||
},
|
||||
{
|
||||
name: "n peers - send n*maxRequestsPerPeer block requests",
|
||||
pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}}, map[int64]tPBlocks{}),
|
||||
maxRequestsPerPeer: 2,
|
||||
expRequests: map[int64]bool{10: true, 11: true},
|
||||
expPeerResults: []testPeerResult{
|
||||
{id: "P1", numPendingBlockRequests: 2},
|
||||
{id: "P2", numPendingBlockRequests: 2}},
|
||||
expnumPendingBlockRequests: 4,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
resetPoolTestResults()
|
||||
|
||||
var pool = tt.pool
|
||||
maxRequestsPerPeer = tt.maxRequestsPerPeer
|
||||
pool.MakeNextRequests(10)
|
||||
assert.Equal(t, testResults.numRequestsSent, maxRequestsPerPeer*len(pool.peers))
|
||||
|
||||
for _, tPeer := range tt.expPeerResults {
|
||||
var peer = pool.peers[tPeer.id]
|
||||
assert.NotNil(t, peer)
|
||||
assert.Equal(t, tPeer.numPendingBlockRequests, peer.NumPendingBlockRequests)
|
||||
}
|
||||
assert.Equal(t, testResults.numRequestsSent, maxRequestsPerPeer*len(pool.peers))
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolAddBlock(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
|
||||
type args struct {
|
||||
peerID p2p.ID
|
||||
block *types.Block
|
||||
blockSize int
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
args args
|
||||
poolWanted *BlockPool
|
||||
errWanted error
|
||||
}{
|
||||
{name: "block from unknown peer",
|
||||
pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
args: args{
|
||||
peerID: "P2",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}),
|
||||
errWanted: errBadDataFromPeer,
|
||||
},
|
||||
{name: "unexpected block 11 from known peer - waiting for 10",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P1",
|
||||
block: types.MakeBlock(int64(11), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{name: "unexpected block 10 from known peer - already have 10",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P1",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P1", false}}),
|
||||
errWanted: errDuplicateBlock,
|
||||
},
|
||||
{name: "unexpected block 10 from known peer P2 - expected 10 to come from P1",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P2",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
errWanted: errBadDataFromPeer,
|
||||
},
|
||||
{name: "expected block from known peer",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", false}}),
|
||||
args: args{
|
||||
peerID: "P1",
|
||||
block: types.MakeBlock(int64(10), txs, nil, nil),
|
||||
blockSize: 100,
|
||||
},
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}}),
|
||||
errWanted: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
err := tt.pool.AddBlock(tt.args.peerID, tt.args.block, tt.args.blockSize)
|
||||
assert.Equal(t, tt.errWanted, err)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolFirstTwoBlocksAndPeers(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
firstWanted int64
|
||||
secondWanted int64
|
||||
errWanted error
|
||||
}{
|
||||
{
|
||||
name: "both blocks missing",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{
|
||||
name: "second block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 18: {"P2", true}}),
|
||||
firstWanted: 15,
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{
|
||||
name: "first block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{16: {"P2", true}, 18: {"P2", true}}),
|
||||
secondWanted: 16,
|
||||
errWanted: errMissingBlock,
|
||||
},
|
||||
{
|
||||
name: "both blocks present",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P2", true}}),
|
||||
firstWanted: 10,
|
||||
secondWanted: 11,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
pool := tt.pool
|
||||
gotFirst, gotSecond, err := pool.FirstTwoBlocksAndPeers()
|
||||
assert.Equal(t, tt.errWanted, err)
|
||||
|
||||
if tt.firstWanted != 0 {
|
||||
peer := pool.blocks[tt.firstWanted]
|
||||
block := pool.peers[peer].blocks[tt.firstWanted]
|
||||
assert.Equal(t, block, gotFirst.block,
|
||||
"BlockPool.FirstTwoBlocksAndPeers() gotFirst = %v, want %v",
|
||||
tt.firstWanted, gotFirst.block.Height)
|
||||
}
|
||||
|
||||
if tt.secondWanted != 0 {
|
||||
peer := pool.blocks[tt.secondWanted]
|
||||
block := pool.peers[peer].blocks[tt.secondWanted]
|
||||
assert.Equal(t, block, gotSecond.block,
|
||||
"BlockPool.FirstTwoBlocksAndPeers() gotFirst = %v, want %v",
|
||||
tt.secondWanted, gotSecond.block.Height)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBlockPoolInvalidateFirstTwoBlocks(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "both blocks missing",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 16: {"P2", true}}),
|
||||
},
|
||||
{
|
||||
name: "second block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{15: {"P1", true}, 18: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{18: {"P2", true}}),
|
||||
},
|
||||
{
|
||||
name: "first block missing",
|
||||
pool: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{18: {"P1", true}, 16: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 15,
|
||||
[]BpPeer{{ID: "P1", Height: 100}},
|
||||
map[int64]tPBlocks{18: {"P1", true}}),
|
||||
},
|
||||
{
|
||||
name: "both blocks present",
|
||||
pool: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{{ID: "P1", Height: 100}, {ID: "P2", Height: 100}},
|
||||
map[int64]tPBlocks{10: {"P1", true}, 11: {"P2", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 10,
|
||||
[]BpPeer{},
|
||||
map[int64]tPBlocks{}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.InvalidateFirstTwoBlocks(errNoPeerResponse)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessedCurrentHeightBlock(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "one peer",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 101, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{101: {"P1", true}}),
|
||||
},
|
||||
{
|
||||
name: "multiple peers",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 101,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.ProcessedCurrentHeightBlock()
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestRemovePeerAtCurrentHeight(t *testing.T) {
|
||||
testBcR := newTestBcR()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
pool *BlockPool
|
||||
poolWanted *BlockPool
|
||||
}{
|
||||
{
|
||||
name: "one peer, remove peer for block at H",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", false}, 101: {"P1", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "one peer, remove peer for block at H+1",
|
||||
pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}},
|
||||
map[int64]tPBlocks{100: {"P1", true}, 101: {"P1", false}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}),
|
||||
},
|
||||
{
|
||||
name: "multiple peers, remove peer for block at H",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", false}, 104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
101: {"P2", true}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
},
|
||||
{
|
||||
name: "multiple peers, remove peer for block at H+1",
|
||||
pool: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P2", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
|
||||
101: {"P2", false}, 103: {"P2", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
poolWanted: makeBlockPool(testBcR, 100,
|
||||
[]BpPeer{{ID: "P1", Height: 120}, {ID: "P3", Height: 130}},
|
||||
map[int64]tPBlocks{
|
||||
100: {"P1", true}, 104: {"P1", true}, 105: {"P1", false},
|
||||
102: {"P3", true}, 106: {"P3", true}}),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
tt.pool.RemovePeerAtCurrentHeights(errNoPeerResponse)
|
||||
assertBlockPoolEquivalent(t, tt.poolWanted, tt.pool)
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,622 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/behaviour"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// BlockchainChannel is a channel for blocks and status updates (`BlockStore` height)
|
||||
BlockchainChannel = byte(0x40)
|
||||
trySyncIntervalMS = 10
|
||||
trySendIntervalMS = 10
|
||||
|
||||
// ask for best height every 10s
|
||||
statusUpdateIntervalSeconds = 10
|
||||
|
||||
// NOTE: keep up to date with bcBlockResponseMessage
|
||||
bcBlockResponseMessagePrefixSize = 4
|
||||
bcBlockResponseMessageFieldKeySize = 1
|
||||
maxMsgSize = types.MaxBlockSizeBytes +
|
||||
bcBlockResponseMessagePrefixSize +
|
||||
bcBlockResponseMessageFieldKeySize
|
||||
)
|
||||
|
||||
var (
|
||||
// Maximum number of requests that can be pending per peer, i.e. for which requests have been sent but blocks
|
||||
// have not been received.
|
||||
maxRequestsPerPeer = 20
|
||||
// Maximum number of block requests for the reactor, pending or for which blocks have been received.
|
||||
maxNumRequests = 64
|
||||
)
|
||||
|
||||
type consensusReactor interface {
|
||||
// for when we switch from blockchain reactor and fast sync to
|
||||
// the consensus machine
|
||||
SwitchToConsensus(sm.State, int)
|
||||
}
|
||||
|
||||
// BlockchainReactor handles long-term catchup syncing.
|
||||
type BlockchainReactor struct {
|
||||
p2p.BaseReactor
|
||||
|
||||
initialState sm.State // immutable
|
||||
state sm.State
|
||||
|
||||
blockExec *sm.BlockExecutor
|
||||
store *store.BlockStore
|
||||
|
||||
fastSync bool
|
||||
|
||||
fsm *BcReactorFSM
|
||||
blocksSynced int
|
||||
|
||||
// Receive goroutine forwards messages to this channel to be processed in the context of the poolRoutine.
|
||||
messagesForFSMCh chan bcReactorMessage
|
||||
|
||||
// Switch goroutine may send RemovePeer to the blockchain reactor. This is an error message that is relayed
|
||||
// to this channel to be processed in the context of the poolRoutine.
|
||||
errorsForFSMCh chan bcReactorMessage
|
||||
|
||||
// This channel is used by the FSM and indirectly the block pool to report errors to the blockchain reactor and
|
||||
// the switch.
|
||||
eventsFromFSMCh chan bcFsmMessage
|
||||
|
||||
swReporter *behaviour.SwitchReporter
|
||||
}
|
||||
|
||||
// NewBlockchainReactor returns new reactor instance.
|
||||
func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *store.BlockStore,
|
||||
fastSync bool) *BlockchainReactor {
|
||||
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight,
|
||||
store.Height()))
|
||||
}
|
||||
|
||||
const capacity = 1000
|
||||
eventsFromFSMCh := make(chan bcFsmMessage, capacity)
|
||||
messagesForFSMCh := make(chan bcReactorMessage, capacity)
|
||||
errorsForFSMCh := make(chan bcReactorMessage, capacity)
|
||||
|
||||
startHeight := store.Height() + 1
|
||||
bcR := &BlockchainReactor{
|
||||
initialState: state,
|
||||
state: state,
|
||||
blockExec: blockExec,
|
||||
fastSync: fastSync,
|
||||
store: store,
|
||||
messagesForFSMCh: messagesForFSMCh,
|
||||
eventsFromFSMCh: eventsFromFSMCh,
|
||||
errorsForFSMCh: errorsForFSMCh,
|
||||
}
|
||||
fsm := NewFSM(startHeight, bcR)
|
||||
bcR.fsm = fsm
|
||||
bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR)
|
||||
//bcR.swReporter = behaviour.NewSwitcReporter(bcR.BaseReactor.Switch)
|
||||
|
||||
return bcR
|
||||
}
|
||||
|
||||
// bcReactorMessage is used by the reactor to send messages to the FSM.
|
||||
type bcReactorMessage struct {
|
||||
event bReactorEvent
|
||||
data bReactorEventData
|
||||
}
|
||||
|
||||
type bFsmEvent uint
|
||||
|
||||
const (
|
||||
// message type events
|
||||
peerErrorEv = iota + 1
|
||||
syncFinishedEv
|
||||
)
|
||||
|
||||
type bFsmEventData struct {
|
||||
peerID p2p.ID
|
||||
err error
|
||||
}
|
||||
|
||||
// bcFsmMessage is used by the FSM to send messages to the reactor
|
||||
type bcFsmMessage struct {
|
||||
event bFsmEvent
|
||||
data bFsmEventData
|
||||
}
|
||||
|
||||
// SetLogger implements cmn.Service by setting the logger on reactor and pool.
|
||||
func (bcR *BlockchainReactor) SetLogger(l log.Logger) {
|
||||
bcR.BaseService.Logger = l
|
||||
bcR.fsm.SetLogger(l)
|
||||
}
|
||||
|
||||
// OnStart implements cmn.Service.
|
||||
func (bcR *BlockchainReactor) OnStart() error {
|
||||
bcR.swReporter = behaviour.NewSwitcReporter(bcR.BaseReactor.Switch)
|
||||
if bcR.fastSync {
|
||||
go bcR.poolRoutine()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// OnStop implements cmn.Service.
|
||||
func (bcR *BlockchainReactor) OnStop() {
|
||||
_ = bcR.Stop()
|
||||
}
|
||||
|
||||
// GetChannels implements Reactor
|
||||
func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: BlockchainChannel,
|
||||
Priority: 10,
|
||||
SendQueueCapacity: 2000,
|
||||
RecvBufferCapacity: 50 * 4096,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// AddPeer implements Reactor by sending our state to peer.
|
||||
func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) {
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
|
||||
peer.Send(BlockchainChannel, msgBytes)
|
||||
// it's OK if send fails. will try later in poolRoutine
|
||||
|
||||
// peer is added to the pool once we receive the first
|
||||
// bcStatusResponseMessage from the peer and call pool.updatePeer()
|
||||
}
|
||||
|
||||
// sendBlockToPeer loads a block and sends it to the requesting peer.
|
||||
// If the block doesn't exist a bcNoBlockResponseMessage is sent.
|
||||
// If all nodes are honest, no node should be requesting for a block that doesn't exist.
|
||||
func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcBlockRequestMessage,
|
||||
src p2p.Peer) (queued bool) {
|
||||
|
||||
block := bcR.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcBlockResponseMessage{Block: block})
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
bcR.Logger.Info("peer asking for a block we don't have", "src", src, "height", msg.Height)
|
||||
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcNoBlockResponseMessage{Height: msg.Height})
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcStatusRequestMessage, src p2p.Peer) (queued bool) {
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()})
|
||||
return src.TrySend(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
// RemovePeer implements Reactor by removing peer from the pool.
|
||||
func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) {
|
||||
msgData := bcReactorMessage{
|
||||
event: peerRemoveEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peer.ID(),
|
||||
err: errSwitchRemovesPeer,
|
||||
},
|
||||
}
|
||||
bcR.errorsForFSMCh <- msgData
|
||||
}
|
||||
|
||||
// Receive implements Reactor by handling 4 types of messages (look below).
|
||||
func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
msg, err := decodeMsg(msgBytes)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("error decoding message",
|
||||
"src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes)
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
if err = msg.ValidateBasic(); err != nil {
|
||||
bcR.Logger.Error("peer sent us invalid msg", "peer", src, "msg", msg, "err", err)
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(src.ID(), err.Error()))
|
||||
return
|
||||
}
|
||||
|
||||
bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg)
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case *bcBlockRequestMessage:
|
||||
if queued := bcR.sendBlockToPeer(msg, src); !queued {
|
||||
// Unfortunately not queued since the queue is full.
|
||||
bcR.Logger.Error("Could not send block message to peer", "src", src, "height", msg.Height)
|
||||
}
|
||||
|
||||
case *bcStatusRequestMessage:
|
||||
// Send peer our state.
|
||||
if queued := bcR.sendStatusResponseToPeer(msg, src); !queued {
|
||||
// Unfortunately not queued since the queue is full.
|
||||
bcR.Logger.Error("Could not send status message to peer", "src", src)
|
||||
}
|
||||
|
||||
case *bcBlockResponseMessage:
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: src.ID(),
|
||||
height: msg.Block.Height,
|
||||
block: msg.Block,
|
||||
length: len(msgBytes),
|
||||
},
|
||||
}
|
||||
bcR.Logger.Info("Received", "src", src, "height", msg.Block.Height)
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
|
||||
case *bcStatusResponseMessage:
|
||||
// Got a peer status. Unverified.
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: statusResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: src.ID(),
|
||||
height: msg.Height,
|
||||
length: len(msgBytes),
|
||||
},
|
||||
}
|
||||
bcR.messagesForFSMCh <- msgForFSM
|
||||
|
||||
default:
|
||||
bcR.Logger.Error(fmt.Sprintf("unknown message type %v", reflect.TypeOf(msg)))
|
||||
}
|
||||
}
|
||||
|
||||
// processBlocksRoutine processes blocks until signlaed to stop over the stopProcessing channel
|
||||
func (bcR *BlockchainReactor) processBlocksRoutine(stopProcessing chan struct{}) {
|
||||
|
||||
processReceivedBlockTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond)
|
||||
doProcessBlockCh := make(chan struct{}, 1)
|
||||
|
||||
lastHundred := time.Now()
|
||||
lastRate := 0.0
|
||||
|
||||
ForLoop:
|
||||
for {
|
||||
select {
|
||||
case <-stopProcessing:
|
||||
bcR.Logger.Info("finishing block execution")
|
||||
break ForLoop
|
||||
case <-processReceivedBlockTicker.C: // try to execute blocks
|
||||
select {
|
||||
case doProcessBlockCh <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
case <-doProcessBlockCh:
|
||||
for {
|
||||
err := bcR.processBlock()
|
||||
if err == errMissingBlock {
|
||||
break
|
||||
}
|
||||
// Notify FSM of block processing result.
|
||||
msgForFSM := bcReactorMessage{
|
||||
event: processedBlockEv,
|
||||
data: bReactorEventData{
|
||||
err: err,
|
||||
},
|
||||
}
|
||||
_ = bcR.fsm.Handle(&msgForFSM)
|
||||
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
bcR.blocksSynced++
|
||||
if bcR.blocksSynced%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
height, maxPeerHeight := bcR.fsm.Status()
|
||||
bcR.Logger.Info("Fast Sync Rate", "height", height,
|
||||
"max_peer_height", maxPeerHeight, "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// poolRoutine receives and handles messages from the Receive() routine and from the FSM.
|
||||
func (bcR *BlockchainReactor) poolRoutine() {
|
||||
|
||||
bcR.fsm.Start()
|
||||
|
||||
sendBlockRequestTicker := time.NewTicker(trySendIntervalMS * time.Millisecond)
|
||||
statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second)
|
||||
|
||||
stopProcessing := make(chan struct{}, 1)
|
||||
go bcR.processBlocksRoutine(stopProcessing)
|
||||
|
||||
ForLoop:
|
||||
for {
|
||||
select {
|
||||
|
||||
case <-sendBlockRequestTicker.C:
|
||||
if !bcR.fsm.NeedsBlocks() {
|
||||
continue
|
||||
}
|
||||
_ = bcR.fsm.Handle(&bcReactorMessage{
|
||||
event: makeRequestsEv,
|
||||
data: bReactorEventData{
|
||||
maxNumRequests: maxNumRequests}})
|
||||
|
||||
case <-statusUpdateTicker.C:
|
||||
// Ask for status updates.
|
||||
go bcR.sendStatusRequest()
|
||||
|
||||
case msg := <-bcR.messagesForFSMCh:
|
||||
// Sent from the Receive() routine when status (statusResponseEv) and
|
||||
// block (blockResponseEv) response events are received
|
||||
_ = bcR.fsm.Handle(&msg)
|
||||
|
||||
case msg := <-bcR.errorsForFSMCh:
|
||||
// Sent from the switch.RemovePeer() routine (RemovePeerEv) and
|
||||
// FSM state timer expiry routine (stateTimeoutEv).
|
||||
_ = bcR.fsm.Handle(&msg)
|
||||
|
||||
case msg := <-bcR.eventsFromFSMCh:
|
||||
switch msg.event {
|
||||
case syncFinishedEv:
|
||||
stopProcessing <- struct{}{}
|
||||
// Sent from the FSM when it enters finished state.
|
||||
break ForLoop
|
||||
case peerErrorEv:
|
||||
// Sent from the FSM when it detects peer error
|
||||
bcR.reportPeerErrorToSwitch(msg.data.err, msg.data.peerID)
|
||||
if msg.data.err == errNoPeerResponse {
|
||||
// Sent from the peer timeout handler routine
|
||||
_ = bcR.fsm.Handle(&bcReactorMessage{
|
||||
event: peerRemoveEv,
|
||||
data: bReactorEventData{
|
||||
peerID: msg.data.peerID,
|
||||
err: msg.data.err,
|
||||
},
|
||||
})
|
||||
}
|
||||
// else {
|
||||
// For slow peers, or errors due to blocks received from wrong peer
|
||||
// the FSM had already removed the peers
|
||||
// }
|
||||
default:
|
||||
bcR.Logger.Error("Event from FSM not supported", "type", msg.event)
|
||||
}
|
||||
|
||||
case <-bcR.Quit():
|
||||
break ForLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) reportPeerErrorToSwitch(err error, peerID p2p.ID) {
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
if peer != nil {
|
||||
_ = bcR.swReporter.Report(behaviour.BadMessage(peerID, err.Error()))
|
||||
}
|
||||
}
|
||||
|
||||
func (bcR *BlockchainReactor) processBlock() error {
|
||||
|
||||
first, second, err := bcR.fsm.FirstTwoBlocks()
|
||||
if err != nil {
|
||||
// We need both to sync the first block.
|
||||
return err
|
||||
}
|
||||
|
||||
chainID := bcR.initialState.ChainID
|
||||
|
||||
firstParts := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstPartsHeader := firstParts.Header()
|
||||
firstID := types.BlockID{Hash: first.Hash(), PartsHeader: firstPartsHeader}
|
||||
// Finally, verify the first block using the second's commit
|
||||
// NOTE: we can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
err = bcR.state.Validators.VerifyCommit(chainID, firstID, first.Height, second.LastCommit)
|
||||
if err != nil {
|
||||
bcR.Logger.Error("error during commit verification", "err", err,
|
||||
"first", first.Height, "second", second.Height)
|
||||
return errBlockVerificationFailure
|
||||
}
|
||||
|
||||
bcR.store.SaveBlock(first, firstParts, second.LastCommit)
|
||||
|
||||
bcR.state, err = bcR.blockExec.ApplyBlock(bcR.state, firstID, first)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// sendStatusRequest broadcasts `BlockStore` height.
|
||||
func (bcR *BlockchainReactor) sendStatusRequest() {
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{bcR.store.Height()})
|
||||
bcR.Switch.Broadcast(BlockchainChannel, msgBytes)
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// BlockRequest sends `BlockRequest` height.
|
||||
func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
peer := bcR.Switch.Peers().Get(peerID)
|
||||
if peer == nil {
|
||||
return errNilPeerForBlockRequest
|
||||
}
|
||||
|
||||
msgBytes := cdc.MustMarshalBinaryBare(&bcBlockRequestMessage{height})
|
||||
queued := peer.TrySend(BlockchainChannel, msgBytes)
|
||||
if !queued {
|
||||
return errSendQueueFull
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
func (bcR *BlockchainReactor) switchToConsensus() {
|
||||
conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor)
|
||||
if ok {
|
||||
conR.SwitchToConsensus(bcR.state, bcR.blocksSynced)
|
||||
bcR.eventsFromFSMCh <- bcFsmMessage{event: syncFinishedEv}
|
||||
}
|
||||
// else {
|
||||
// Should only happen during testing.
|
||||
// }
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
// Called by FSM and pool:
|
||||
// - pool calls when it detects slow peer or when peer times out
|
||||
// - FSM calls when:
|
||||
// - adding a block (addBlock) fails
|
||||
// - reactor processing of a block reports failure and FSM sends back the peers of first and second blocks
|
||||
func (bcR *BlockchainReactor) sendPeerError(err error, peerID p2p.ID) {
|
||||
bcR.Logger.Info("sendPeerError:", "peer", peerID, "error", err)
|
||||
msgData := bcFsmMessage{
|
||||
event: peerErrorEv,
|
||||
data: bFsmEventData{
|
||||
peerID: peerID,
|
||||
err: err,
|
||||
},
|
||||
}
|
||||
bcR.eventsFromFSMCh <- msgData
|
||||
}
|
||||
|
||||
// Implements bcRNotifier
|
||||
func (bcR *BlockchainReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
|
||||
if timer == nil {
|
||||
panic("nil timer pointer parameter")
|
||||
}
|
||||
if *timer == nil {
|
||||
*timer = time.AfterFunc(timeout, func() {
|
||||
msg := bcReactorMessage{
|
||||
event: stateTimeoutEv,
|
||||
data: bReactorEventData{
|
||||
stateName: name,
|
||||
},
|
||||
}
|
||||
bcR.errorsForFSMCh <- msg
|
||||
})
|
||||
} else {
|
||||
(*timer).Reset(timeout)
|
||||
}
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Messages
|
||||
|
||||
// BlockchainMessage is a generic message for this reactor.
|
||||
type BlockchainMessage interface {
|
||||
ValidateBasic() error
|
||||
}
|
||||
|
||||
// RegisterBlockchainMessages registers the fast sync messages for amino encoding.
|
||||
func RegisterBlockchainMessages(cdc *amino.Codec) {
|
||||
cdc.RegisterInterface((*BlockchainMessage)(nil), nil)
|
||||
cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/blockchain/BlockRequest", nil)
|
||||
cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/blockchain/BlockResponse", nil)
|
||||
cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/blockchain/NoBlockResponse", nil)
|
||||
cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/blockchain/StatusResponse", nil)
|
||||
cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/blockchain/StatusRequest", nil)
|
||||
}
|
||||
|
||||
func decodeMsg(bz []byte) (msg BlockchainMessage, err error) {
|
||||
if len(bz) > maxMsgSize {
|
||||
return msg, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize)
|
||||
}
|
||||
err = cdc.UnmarshalBinaryBare(bz, &msg)
|
||||
return
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type bcBlockRequestMessage struct {
|
||||
Height int64
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcBlockRequestMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *bcBlockRequestMessage) String() string {
|
||||
return fmt.Sprintf("[bcBlockRequestMessage %v]", m.Height)
|
||||
}
|
||||
|
||||
type bcNoBlockResponseMessage struct {
|
||||
Height int64
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcNoBlockResponseMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *bcNoBlockResponseMessage) String() string {
|
||||
return fmt.Sprintf("[bcNoBlockResponseMessage %d]", m.Height)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type bcBlockResponseMessage struct {
|
||||
Block *types.Block
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcBlockResponseMessage) ValidateBasic() error {
|
||||
return m.Block.ValidateBasic()
|
||||
}
|
||||
|
||||
func (m *bcBlockResponseMessage) String() string {
|
||||
return fmt.Sprintf("[bcBlockResponseMessage %v]", m.Block.Height)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type bcStatusRequestMessage struct {
|
||||
Height int64
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcStatusRequestMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *bcStatusRequestMessage) String() string {
|
||||
return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height)
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type bcStatusResponseMessage struct {
|
||||
Height int64
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (m *bcStatusResponseMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *bcStatusResponseMessage) String() string {
|
||||
return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height)
|
||||
}
|
||||
@@ -1,450 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Blockchain Reactor State
|
||||
type bcReactorFSMState struct {
|
||||
name string
|
||||
|
||||
// called when transitioning out of current state
|
||||
handle func(*BcReactorFSM, bReactorEvent, bReactorEventData) (next *bcReactorFSMState, err error)
|
||||
// called when entering the state
|
||||
enter func(fsm *BcReactorFSM)
|
||||
|
||||
// timeout to ensure FSM is not stuck in a state forever
|
||||
// the timer is owned and run by the fsm instance
|
||||
timeout time.Duration
|
||||
}
|
||||
|
||||
func (s *bcReactorFSMState) String() string {
|
||||
return s.name
|
||||
}
|
||||
|
||||
// BcReactorFSM is the datastructure for the Blockchain Reactor State Machine
|
||||
type BcReactorFSM struct {
|
||||
logger log.Logger
|
||||
mtx sync.Mutex
|
||||
|
||||
startTime time.Time
|
||||
|
||||
state *bcReactorFSMState
|
||||
stateTimer *time.Timer
|
||||
pool *BlockPool
|
||||
|
||||
// interface used to call the Blockchain reactor to send StatusRequest, BlockRequest, reporting errors, etc.
|
||||
toBcR bcReactor
|
||||
}
|
||||
|
||||
// NewFSM creates a new reactor FSM.
|
||||
func NewFSM(height int64, toBcR bcReactor) *BcReactorFSM {
|
||||
return &BcReactorFSM{
|
||||
state: unknown,
|
||||
startTime: time.Now(),
|
||||
pool: NewBlockPool(height, toBcR),
|
||||
toBcR: toBcR,
|
||||
}
|
||||
}
|
||||
|
||||
// bReactorEventData is part of the message sent by the reactor to the FSM and used by the state handlers.
|
||||
type bReactorEventData struct {
|
||||
peerID p2p.ID
|
||||
err error // for peer error: timeout, slow; for processed block event if error occurred
|
||||
height int64 // for status response; for processed block event
|
||||
block *types.Block // for block response
|
||||
stateName string // for state timeout events
|
||||
length int // for block response event, length of received block, used to detect slow peers
|
||||
maxNumRequests int // for request needed event, maximum number of pending requests
|
||||
}
|
||||
|
||||
// Blockchain Reactor Events (the input to the state machine)
|
||||
type bReactorEvent uint
|
||||
|
||||
const (
|
||||
// message type events
|
||||
startFSMEv = iota + 1
|
||||
statusResponseEv
|
||||
blockResponseEv
|
||||
processedBlockEv
|
||||
makeRequestsEv
|
||||
stopFSMEv
|
||||
|
||||
// other events
|
||||
peerRemoveEv = iota + 256
|
||||
stateTimeoutEv
|
||||
)
|
||||
|
||||
func (msg *bcReactorMessage) String() string {
|
||||
var dataStr string
|
||||
|
||||
switch msg.event {
|
||||
case startFSMEv:
|
||||
dataStr = ""
|
||||
case statusResponseEv:
|
||||
dataStr = fmt.Sprintf("peer=%v height=%v", msg.data.peerID, msg.data.height)
|
||||
case blockResponseEv:
|
||||
dataStr = fmt.Sprintf("peer=%v block.height=%v length=%v",
|
||||
msg.data.peerID, msg.data.block.Height, msg.data.length)
|
||||
case processedBlockEv:
|
||||
dataStr = fmt.Sprintf("error=%v", msg.data.err)
|
||||
case makeRequestsEv:
|
||||
dataStr = ""
|
||||
case stopFSMEv:
|
||||
dataStr = ""
|
||||
case peerRemoveEv:
|
||||
dataStr = fmt.Sprintf("peer: %v is being removed by the switch", msg.data.peerID)
|
||||
case stateTimeoutEv:
|
||||
dataStr = fmt.Sprintf("state=%v", msg.data.stateName)
|
||||
default:
|
||||
dataStr = fmt.Sprintf("cannot interpret message data")
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v: %v", msg.event, dataStr)
|
||||
}
|
||||
|
||||
func (ev bReactorEvent) String() string {
|
||||
switch ev {
|
||||
case startFSMEv:
|
||||
return "startFSMEv"
|
||||
case statusResponseEv:
|
||||
return "statusResponseEv"
|
||||
case blockResponseEv:
|
||||
return "blockResponseEv"
|
||||
case processedBlockEv:
|
||||
return "processedBlockEv"
|
||||
case makeRequestsEv:
|
||||
return "makeRequestsEv"
|
||||
case stopFSMEv:
|
||||
return "stopFSMEv"
|
||||
case peerRemoveEv:
|
||||
return "peerRemoveEv"
|
||||
case stateTimeoutEv:
|
||||
return "stateTimeoutEv"
|
||||
default:
|
||||
return "event unknown"
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// states
|
||||
var (
|
||||
unknown *bcReactorFSMState
|
||||
waitForPeer *bcReactorFSMState
|
||||
waitForBlock *bcReactorFSMState
|
||||
finished *bcReactorFSMState
|
||||
)
|
||||
|
||||
// timeouts for state timers
|
||||
const (
|
||||
waitForPeerTimeout = 3 * time.Second
|
||||
waitForBlockAtCurrentHeightTimeout = 10 * time.Second
|
||||
)
|
||||
|
||||
// errors
|
||||
var (
|
||||
// internal to the package
|
||||
errNoErrorFinished = errors.New("fast sync is finished")
|
||||
errInvalidEvent = errors.New("invalid event in current state")
|
||||
errMissingBlock = errors.New("missing blocks")
|
||||
errNilPeerForBlockRequest = errors.New("peer for block request does not exist in the switch")
|
||||
errSendQueueFull = errors.New("block request not made, send-queue is full")
|
||||
errPeerTooShort = errors.New("peer height too low, old peer removed/ new peer not added")
|
||||
errSwitchRemovesPeer = errors.New("switch is removing peer")
|
||||
errTimeoutEventWrongState = errors.New("timeout event for a state different than the current one")
|
||||
errNoTallerPeer = errors.New("fast sync timed out on waiting for a peer taller than this node")
|
||||
|
||||
// reported eventually to the switch
|
||||
errPeerLowersItsHeight = errors.New("fast sync peer reports a height lower than previous") // handle return
|
||||
errNoPeerResponseForCurrentHeights = errors.New("fast sync timed out on peer block response for current heights") // handle return
|
||||
errNoPeerResponse = errors.New("fast sync timed out on peer block response") // xx
|
||||
errBadDataFromPeer = errors.New("fast sync received block from wrong peer or block is bad") // xx
|
||||
errDuplicateBlock = errors.New("fast sync received duplicate block from peer")
|
||||
errBlockVerificationFailure = errors.New("fast sync block verification failure") // xx
|
||||
errSlowPeer = errors.New("fast sync peer is not sending us data fast enough") // xx
|
||||
|
||||
)
|
||||
|
||||
func init() {
|
||||
unknown = &bcReactorFSMState{
|
||||
name: "unknown",
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
switch ev {
|
||||
case startFSMEv:
|
||||
// Broadcast Status message. Currently doesn't return non-nil error.
|
||||
fsm.toBcR.sendStatusRequest()
|
||||
return waitForPeer, nil
|
||||
|
||||
case stopFSMEv:
|
||||
return finished, errNoErrorFinished
|
||||
|
||||
default:
|
||||
return unknown, errInvalidEvent
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
waitForPeer = &bcReactorFSMState{
|
||||
name: "waitForPeer",
|
||||
timeout: waitForPeerTimeout,
|
||||
enter: func(fsm *BcReactorFSM) {
|
||||
// Stop when leaving the state.
|
||||
fsm.resetStateTimer()
|
||||
},
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
switch ev {
|
||||
case stateTimeoutEv:
|
||||
if data.stateName != "waitForPeer" {
|
||||
fsm.logger.Error("received a state timeout event for different state",
|
||||
"state", data.stateName)
|
||||
return waitForPeer, errTimeoutEventWrongState
|
||||
}
|
||||
// There was no statusResponse received from any peer.
|
||||
// Should we send status request again?
|
||||
return finished, errNoTallerPeer
|
||||
|
||||
case statusResponseEv:
|
||||
if err := fsm.pool.UpdatePeer(data.peerID, data.height); err != nil {
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, err
|
||||
}
|
||||
}
|
||||
if fsm.stateTimer != nil {
|
||||
fsm.stateTimer.Stop()
|
||||
}
|
||||
return waitForBlock, nil
|
||||
|
||||
case stopFSMEv:
|
||||
if fsm.stateTimer != nil {
|
||||
fsm.stateTimer.Stop()
|
||||
}
|
||||
return finished, errNoErrorFinished
|
||||
|
||||
default:
|
||||
return waitForPeer, errInvalidEvent
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
waitForBlock = &bcReactorFSMState{
|
||||
name: "waitForBlock",
|
||||
timeout: waitForBlockAtCurrentHeightTimeout,
|
||||
enter: func(fsm *BcReactorFSM) {
|
||||
// Stop when leaving the state.
|
||||
fsm.resetStateTimer()
|
||||
},
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
switch ev {
|
||||
|
||||
case statusResponseEv:
|
||||
err := fsm.pool.UpdatePeer(data.peerID, data.height)
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, err
|
||||
}
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, err
|
||||
}
|
||||
return waitForBlock, err
|
||||
|
||||
case blockResponseEv:
|
||||
fsm.logger.Debug("blockResponseEv", "H", data.block.Height)
|
||||
err := fsm.pool.AddBlock(data.peerID, data.block, data.length)
|
||||
if err != nil {
|
||||
// A block was received that was unsolicited, from unexpected peer, or that we already have it.
|
||||
// Ignore block, remove peer and send error to switch.
|
||||
fsm.pool.RemovePeer(data.peerID, err)
|
||||
fsm.toBcR.sendPeerError(err, data.peerID)
|
||||
}
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, err
|
||||
}
|
||||
return waitForBlock, err
|
||||
|
||||
case processedBlockEv:
|
||||
if data.err != nil {
|
||||
first, second, _ := fsm.pool.FirstTwoBlocksAndPeers()
|
||||
fsm.logger.Error("error processing block", "err", data.err,
|
||||
"first", first.block.Height, "second", second.block.Height)
|
||||
fsm.logger.Error("send peer error for", "peer", first.peer.ID)
|
||||
fsm.toBcR.sendPeerError(data.err, first.peer.ID)
|
||||
fsm.logger.Error("send peer error for", "peer", second.peer.ID)
|
||||
fsm.toBcR.sendPeerError(data.err, second.peer.ID)
|
||||
// Remove the first two blocks. This will also remove the peers
|
||||
fsm.pool.InvalidateFirstTwoBlocks(data.err)
|
||||
} else {
|
||||
fsm.pool.ProcessedCurrentHeightBlock()
|
||||
// Since we advanced one block reset the state timer
|
||||
fsm.resetStateTimer()
|
||||
}
|
||||
|
||||
// Both cases above may result in achieving maximum height.
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, nil
|
||||
}
|
||||
|
||||
return waitForBlock, data.err
|
||||
|
||||
case peerRemoveEv:
|
||||
// This event is sent by the switch to remove disconnected and errored peers.
|
||||
fsm.pool.RemovePeer(data.peerID, data.err)
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, nil
|
||||
}
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, nil
|
||||
}
|
||||
return waitForBlock, nil
|
||||
|
||||
case makeRequestsEv:
|
||||
fsm.makeNextRequests(data.maxNumRequests)
|
||||
return waitForBlock, nil
|
||||
|
||||
case stateTimeoutEv:
|
||||
if data.stateName != "waitForBlock" {
|
||||
fsm.logger.Error("received a state timeout event for different state",
|
||||
"state", data.stateName)
|
||||
return waitForBlock, errTimeoutEventWrongState
|
||||
}
|
||||
// We haven't received the block at current height or height+1. Remove peer.
|
||||
fsm.pool.RemovePeerAtCurrentHeights(errNoPeerResponseForCurrentHeights)
|
||||
fsm.resetStateTimer()
|
||||
if fsm.pool.NumPeers() == 0 {
|
||||
return waitForPeer, errNoPeerResponseForCurrentHeights
|
||||
}
|
||||
if fsm.pool.ReachedMaxHeight() {
|
||||
return finished, nil
|
||||
}
|
||||
return waitForBlock, errNoPeerResponseForCurrentHeights
|
||||
|
||||
case stopFSMEv:
|
||||
if fsm.stateTimer != nil {
|
||||
fsm.stateTimer.Stop()
|
||||
}
|
||||
return finished, errNoErrorFinished
|
||||
|
||||
default:
|
||||
return waitForBlock, errInvalidEvent
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
finished = &bcReactorFSMState{
|
||||
name: "finished",
|
||||
enter: func(fsm *BcReactorFSM) {
|
||||
fsm.logger.Info("Time to switch to consensus reactor!", "height", fsm.pool.Height)
|
||||
fsm.toBcR.switchToConsensus()
|
||||
fsm.cleanup()
|
||||
},
|
||||
handle: func(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) (*bcReactorFSMState, error) {
|
||||
return finished, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Interface used by FSM for sending Block and Status requests,
|
||||
// informing of peer errors and state timeouts
|
||||
// Implemented by BlockchainReactor and tests
|
||||
type bcReactor interface {
|
||||
sendStatusRequest()
|
||||
sendBlockRequest(peerID p2p.ID, height int64) error
|
||||
sendPeerError(err error, peerID p2p.ID)
|
||||
resetStateTimer(name string, timer **time.Timer, timeout time.Duration)
|
||||
switchToConsensus()
|
||||
}
|
||||
|
||||
// SetLogger sets the FSM logger.
|
||||
func (fsm *BcReactorFSM) SetLogger(l log.Logger) {
|
||||
fsm.logger = l
|
||||
fsm.pool.SetLogger(l)
|
||||
}
|
||||
|
||||
// Start starts the FSM.
|
||||
func (fsm *BcReactorFSM) Start() {
|
||||
_ = fsm.Handle(&bcReactorMessage{event: startFSMEv})
|
||||
}
|
||||
|
||||
// Handle processes messages and events sent to the FSM.
|
||||
func (fsm *BcReactorFSM) Handle(msg *bcReactorMessage) error {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
fsm.logger.Debug("FSM received", "event", msg, "state", fsm.state)
|
||||
|
||||
if fsm.state == nil {
|
||||
fsm.state = unknown
|
||||
}
|
||||
next, err := fsm.state.handle(fsm, msg.event, msg.data)
|
||||
if err != nil {
|
||||
fsm.logger.Error("FSM event handler returned", "err", err,
|
||||
"state", fsm.state, "event", msg.event)
|
||||
}
|
||||
|
||||
oldState := fsm.state.name
|
||||
fsm.transition(next)
|
||||
if oldState != fsm.state.name {
|
||||
fsm.logger.Info("FSM changed state", "new_state", fsm.state)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) transition(next *bcReactorFSMState) {
|
||||
if next == nil {
|
||||
return
|
||||
}
|
||||
if fsm.state != next {
|
||||
fsm.state = next
|
||||
if next.enter != nil {
|
||||
next.enter(fsm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Called when entering an FSM state in order to detect lack of progress in the state machine.
|
||||
// Note the use of the 'bcr' interface to facilitate testing without timer expiring.
|
||||
func (fsm *BcReactorFSM) resetStateTimer() {
|
||||
fsm.toBcR.resetStateTimer(fsm.state.name, &fsm.stateTimer, fsm.state.timeout)
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) isCaughtUp() bool {
|
||||
return fsm.state == finished
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) makeNextRequests(maxNumRequests int) {
|
||||
fsm.pool.MakeNextRequests(maxNumRequests)
|
||||
}
|
||||
|
||||
func (fsm *BcReactorFSM) cleanup() {
|
||||
fsm.pool.Cleanup()
|
||||
}
|
||||
|
||||
// NeedsBlocks checks if more block requests are required.
|
||||
func (fsm *BcReactorFSM) NeedsBlocks() bool {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
return fsm.state.name == "waitForBlock" && fsm.pool.NeedsBlocks()
|
||||
}
|
||||
|
||||
// FirstTwoBlocks returns the two blocks at pool height and height+1
|
||||
func (fsm *BcReactorFSM) FirstTwoBlocks() (first, second *types.Block, err error) {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
firstBP, secondBP, err := fsm.pool.FirstTwoBlocksAndPeers()
|
||||
if err == nil {
|
||||
first = firstBP.block
|
||||
second = secondBP.block
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Status returns the pool's height and the maximum peer height.
|
||||
func (fsm *BcReactorFSM) Status() (height, maxPeerHeight int64) {
|
||||
fsm.mtx.Lock()
|
||||
defer fsm.mtx.Unlock()
|
||||
return fsm.pool.Height, fsm.pool.MaxPeerHeight
|
||||
}
|
||||
@@ -1,942 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type lastBlockRequestT struct {
|
||||
peerID p2p.ID
|
||||
height int64
|
||||
}
|
||||
|
||||
type lastPeerErrorT struct {
|
||||
peerID p2p.ID
|
||||
err error
|
||||
}
|
||||
|
||||
// reactor for FSM testing
|
||||
type testReactor struct {
|
||||
logger log.Logger
|
||||
fsm *BcReactorFSM
|
||||
numStatusRequests int
|
||||
numBlockRequests int
|
||||
lastBlockRequest lastBlockRequestT
|
||||
lastPeerError lastPeerErrorT
|
||||
stateTimerStarts map[string]int
|
||||
}
|
||||
|
||||
func sendEventToFSM(fsm *BcReactorFSM, ev bReactorEvent, data bReactorEventData) error {
|
||||
return fsm.Handle(&bcReactorMessage{event: ev, data: data})
|
||||
}
|
||||
|
||||
type fsmStepTestValues struct {
|
||||
currentState string
|
||||
event bReactorEvent
|
||||
data bReactorEventData
|
||||
|
||||
wantErr error
|
||||
wantState string
|
||||
wantStatusReqSent bool
|
||||
wantReqIncreased bool
|
||||
wantNewBlocks []int64
|
||||
wantRemovedPeers []p2p.ID
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// helper test function for different FSM events, state and expected behavior
|
||||
func sStopFSMEv(current, expected string) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: stopFSMEv,
|
||||
wantState: expected,
|
||||
wantErr: errNoErrorFinished}
|
||||
}
|
||||
|
||||
func sUnknownFSMEv(current string) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: 1234,
|
||||
wantState: current,
|
||||
wantErr: errInvalidEvent}
|
||||
}
|
||||
|
||||
func sStartFSMEv() fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: "unknown",
|
||||
event: startFSMEv,
|
||||
wantState: "waitForPeer",
|
||||
wantStatusReqSent: true}
|
||||
}
|
||||
|
||||
func sStateTimeoutEv(current, expected string, timedoutState string, wantErr error) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: stateTimeoutEv,
|
||||
data: bReactorEventData{
|
||||
stateName: timedoutState,
|
||||
},
|
||||
wantState: expected,
|
||||
wantErr: wantErr,
|
||||
}
|
||||
}
|
||||
|
||||
func sProcessedBlockEv(current, expected string, reactorError error) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: processedBlockEv,
|
||||
data: bReactorEventData{
|
||||
err: reactorError,
|
||||
},
|
||||
wantState: expected,
|
||||
wantErr: reactorError,
|
||||
}
|
||||
}
|
||||
|
||||
func sStatusEv(current, expected string, peerID p2p.ID, height int64, err error) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: statusResponseEv,
|
||||
data: bReactorEventData{peerID: peerID, height: height},
|
||||
wantState: expected,
|
||||
wantErr: err}
|
||||
}
|
||||
|
||||
func sMakeRequestsEv(current, expected string, maxPendingRequests int) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: makeRequestsEv,
|
||||
data: bReactorEventData{maxNumRequests: maxPendingRequests},
|
||||
wantState: expected,
|
||||
wantReqIncreased: true,
|
||||
}
|
||||
}
|
||||
|
||||
func sMakeRequestsEvErrored(current, expected string,
|
||||
maxPendingRequests int, err error, peersRemoved []p2p.ID) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: makeRequestsEv,
|
||||
data: bReactorEventData{maxNumRequests: maxPendingRequests},
|
||||
wantState: expected,
|
||||
wantErr: err,
|
||||
wantRemovedPeers: peersRemoved,
|
||||
wantReqIncreased: true,
|
||||
}
|
||||
}
|
||||
|
||||
func sBlockRespEv(current, expected string, peerID p2p.ID, height int64, prevBlocks []int64) fsmStepTestValues {
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peerID,
|
||||
height: height,
|
||||
block: types.MakeBlock(height, txs, nil, nil),
|
||||
length: 100},
|
||||
wantState: expected,
|
||||
wantNewBlocks: append(prevBlocks, height),
|
||||
}
|
||||
}
|
||||
|
||||
func sBlockRespEvErrored(current, expected string,
|
||||
peerID p2p.ID, height int64, prevBlocks []int64, wantErr error, peersRemoved []p2p.ID) fsmStepTestValues {
|
||||
txs := []types.Tx{types.Tx("foo"), types.Tx("bar")}
|
||||
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: blockResponseEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peerID,
|
||||
height: height,
|
||||
block: types.MakeBlock(height, txs, nil, nil),
|
||||
length: 100},
|
||||
wantState: expected,
|
||||
wantErr: wantErr,
|
||||
wantRemovedPeers: peersRemoved,
|
||||
wantNewBlocks: prevBlocks,
|
||||
}
|
||||
}
|
||||
|
||||
func sPeerRemoveEv(current, expected string, peerID p2p.ID, err error, peersRemoved []p2p.ID) fsmStepTestValues {
|
||||
return fsmStepTestValues{
|
||||
currentState: current,
|
||||
event: peerRemoveEv,
|
||||
data: bReactorEventData{
|
||||
peerID: peerID,
|
||||
err: err,
|
||||
},
|
||||
wantState: expected,
|
||||
wantRemovedPeers: peersRemoved,
|
||||
}
|
||||
}
|
||||
|
||||
// --------------------------------------------
|
||||
|
||||
func newTestReactor(height int64) *testReactor {
|
||||
testBcR := &testReactor{logger: log.TestingLogger(), stateTimerStarts: make(map[string]int)}
|
||||
testBcR.fsm = NewFSM(height, testBcR)
|
||||
testBcR.fsm.SetLogger(testBcR.logger)
|
||||
return testBcR
|
||||
}
|
||||
|
||||
func fixBlockResponseEvStep(step *fsmStepTestValues, testBcR *testReactor) {
|
||||
// There is currently no good way to know to which peer a block request was sent.
|
||||
// So in some cases where it does not matter, before we simulate a block response
|
||||
// we cheat and look where it is expected from.
|
||||
if step.event == blockResponseEv {
|
||||
height := step.data.height
|
||||
peerID, ok := testBcR.fsm.pool.blocks[height]
|
||||
if ok {
|
||||
step.data.peerID = peerID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type testFields struct {
|
||||
name string
|
||||
startingHeight int64
|
||||
maxRequestsPerPeer int
|
||||
maxPendingRequests int
|
||||
steps []fsmStepTestValues
|
||||
}
|
||||
|
||||
func executeFSMTests(t *testing.T, tests []testFields, matchRespToReq bool) {
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create test reactor
|
||||
testBcR := newTestReactor(tt.startingHeight)
|
||||
|
||||
if tt.maxRequestsPerPeer != 0 {
|
||||
maxRequestsPerPeer = tt.maxRequestsPerPeer
|
||||
}
|
||||
|
||||
for _, step := range tt.steps {
|
||||
step := step
|
||||
assert.Equal(t, step.currentState, testBcR.fsm.state.name)
|
||||
|
||||
var heightBefore int64
|
||||
if step.event == processedBlockEv && step.data.err == errBlockVerificationFailure {
|
||||
heightBefore = testBcR.fsm.pool.Height
|
||||
}
|
||||
oldNumStatusRequests := testBcR.numStatusRequests
|
||||
oldNumBlockRequests := testBcR.numBlockRequests
|
||||
if matchRespToReq {
|
||||
fixBlockResponseEvStep(&step, testBcR)
|
||||
}
|
||||
|
||||
fsmErr := sendEventToFSM(testBcR.fsm, step.event, step.data)
|
||||
assert.Equal(t, step.wantErr, fsmErr)
|
||||
|
||||
if step.wantStatusReqSent {
|
||||
assert.Equal(t, oldNumStatusRequests+1, testBcR.numStatusRequests)
|
||||
} else {
|
||||
assert.Equal(t, oldNumStatusRequests, testBcR.numStatusRequests)
|
||||
}
|
||||
|
||||
if step.wantReqIncreased {
|
||||
assert.True(t, oldNumBlockRequests < testBcR.numBlockRequests)
|
||||
} else {
|
||||
assert.Equal(t, oldNumBlockRequests, testBcR.numBlockRequests)
|
||||
}
|
||||
|
||||
for _, height := range step.wantNewBlocks {
|
||||
_, err := testBcR.fsm.pool.BlockAndPeerAtHeight(height)
|
||||
assert.Nil(t, err)
|
||||
}
|
||||
if step.event == processedBlockEv && step.data.err == errBlockVerificationFailure {
|
||||
heightAfter := testBcR.fsm.pool.Height
|
||||
assert.Equal(t, heightBefore, heightAfter)
|
||||
firstAfter, err1 := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height)
|
||||
secondAfter, err2 := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height + 1)
|
||||
assert.NotNil(t, err1)
|
||||
assert.NotNil(t, err2)
|
||||
assert.Nil(t, firstAfter)
|
||||
assert.Nil(t, secondAfter)
|
||||
}
|
||||
|
||||
assert.Equal(t, step.wantState, testBcR.fsm.state.name)
|
||||
|
||||
if step.wantState == "finished" {
|
||||
assert.True(t, testBcR.fsm.isCaughtUp())
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSMBasic(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "one block, one peer - TS2",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 2,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 2, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "multi block, multi peer - TS2",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 2,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 4, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 4, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 4, []int64{1, 2, 3}),
|
||||
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, true)
|
||||
}
|
||||
|
||||
func TestFSMBlockVerificationFailure(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "block verification failure - TS2 variant",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
|
||||
// add P1 and get blocks 1-3 from it
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
|
||||
// process block failure, should remove P1 and all blocks
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", errBlockVerificationFailure),
|
||||
|
||||
// get blocks 1-3 from P2
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{1, 2}),
|
||||
|
||||
// finish after processing blocks 1 and 2
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMBadBlockFromPeer(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "block we haven't asked for",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and ask for blocks 1-3
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 300, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// blockResponseEv for height 100 should cause an error
|
||||
sBlockRespEvErrored("waitForBlock", "waitForPeer",
|
||||
"P1", 100, []int64{}, errMissingBlock, []p2p.ID{}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block we already have",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and get block 1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 100, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P1", 1, []int64{}),
|
||||
|
||||
// Get block 1 again. Since peer is removed together with block 1,
|
||||
// the blocks present in the pool should be {}
|
||||
sBlockRespEvErrored("waitForBlock", "waitForPeer",
|
||||
"P1", 1, []int64{}, errDuplicateBlock, []p2p.ID{"P1"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block from unknown peer",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and get block 1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
|
||||
// get block 1 from unknown peer P2
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEvErrored("waitForBlock", "waitForBlock",
|
||||
"P2", 1, []int64{}, errBadDataFromPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block from wrong peer",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, make requests for blocks 1-3 to P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
|
||||
// receive block 1 from P2
|
||||
sBlockRespEvErrored("waitForBlock", "waitForBlock",
|
||||
"P2", 1, []int64{}, errBadDataFromPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMBlockAtCurrentHeightDoesNotArriveInTime(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "block at current height undelivered - TS5",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, get blocks 1 and 2, process block 1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P1", 2, []int64{1}),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
|
||||
// timeout on block 3, P1 should be removed
|
||||
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForBlock", errNoPeerResponseForCurrentHeights),
|
||||
|
||||
// make requests and finish by receiving blocks 2 and 3 from P2
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 2, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P2", 3, []int64{2}),
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "block at current height undelivered, at maxPeerHeight after peer removal - TS3",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, request blocks 1-3 from P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// add P2 (tallest)
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 30, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// receive blocks 1-3 from P1
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 1, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 2, []int64{1}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 3, []int64{1, 2}),
|
||||
|
||||
// process blocks at heights 1 and 2
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// timeout on block at height 4
|
||||
sStateTimeoutEv("waitForBlock", "finished", "waitForBlock", nil),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, true)
|
||||
}
|
||||
|
||||
func TestFSMPeerRelatedEvents(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "peer remove event with no blocks",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1, P2, P3
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P3", 3, nil),
|
||||
|
||||
// switch removes P2
|
||||
sPeerRemoveEv("waitForBlock", "waitForBlock", "P2", errSwitchRemovesPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only peer removed while in waitForBlock state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
|
||||
|
||||
// switch removes P1
|
||||
sPeerRemoveEv("waitForBlock", "waitForPeer", "P1", errSwitchRemovesPeer, []p2p.ID{"P1"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "highest peer removed while in waitForBlock state, node reaches maxPeerHeight - TS4 ",
|
||||
startingHeight: 100,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and make requests
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 101, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 200, nil),
|
||||
|
||||
// get blocks 100 and 101 from P1 and process block at height 100
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 100, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 101, []int64{100}),
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// switch removes peer P1, should be finished
|
||||
sPeerRemoveEv("waitForBlock", "finished", "P2", errSwitchRemovesPeer, []p2p.ID{"P2"}),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "highest peer lowers its height in waitForBlock state, node reaches maxPeerHeight - TS4",
|
||||
startingHeight: 100,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1 and make requests
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 101, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
|
||||
// add P2
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 200, nil),
|
||||
|
||||
// get blocks 100 and 101 from P1
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 100, []int64{}),
|
||||
sBlockRespEv("waitForBlock", "waitForBlock", "P1", 101, []int64{100}),
|
||||
|
||||
// processed block at heights 100
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil),
|
||||
|
||||
// P2 becomes short
|
||||
sStatusEv("waitForBlock", "finished", "P2", 100, errPeerLowersItsHeight),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new short peer while in waitForPeer state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForPeer", "P1", 3, errPeerTooShort),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "new short peer while in waitForBlock state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, errPeerTooShort),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "only peer updated with low height while in waitForBlock state",
|
||||
startingHeight: 100,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 200, nil),
|
||||
sStatusEv("waitForBlock", "waitForPeer", "P1", 3, errPeerLowersItsHeight),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "peer does not exist in the switch",
|
||||
startingHeight: 9999999,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
// add P1
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 20000000, nil),
|
||||
// send request for block 9999999
|
||||
// Note: For this block request the "switch missing the peer" error is simulated,
|
||||
// see implementation of bcReactor interface, sendBlockRequest(), in this file.
|
||||
sMakeRequestsEvErrored("waitForBlock", "waitForBlock",
|
||||
maxNumRequests, nil, []p2p.ID{"P1"}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, true)
|
||||
}
|
||||
|
||||
func TestFSMStopFSM(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "stopFSMEv in unknown",
|
||||
steps: []fsmStepTestValues{
|
||||
sStopFSMEv("unknown", "finished"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "stopFSMEv in waitForPeer",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStopFSMEv("waitForPeer", "finished"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "stopFSMEv in waitForBlock",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sStopFSMEv("waitForBlock", "finished"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMUnknownElements(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "unknown event for state unknown",
|
||||
steps: []fsmStepTestValues{
|
||||
sUnknownFSMEv("unknown"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown event for state waitForPeer",
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sUnknownFSMEv("waitForPeer"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unknown event for state waitForBlock",
|
||||
startingHeight: 1,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sUnknownFSMEv("waitForBlock"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func TestFSMPeerStateTimeoutEvent(t *testing.T) {
|
||||
tests := []testFields{
|
||||
{
|
||||
name: "timeout event for state waitForPeer while in state waitForPeer - TS1",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStateTimeoutEv("waitForPeer", "finished", "waitForPeer", errNoTallerPeer),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForPeer while in a state != waitForPeer",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStateTimeoutEv("waitForPeer", "waitForPeer", "waitForBlock", errTimeoutEventWrongState),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForBlock while in state waitForBlock ",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sStateTimeoutEv("waitForBlock", "waitForPeer", "waitForBlock", errNoPeerResponseForCurrentHeights),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForBlock while in a state != waitForBlock",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForPeer", errTimeoutEventWrongState),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "timeout event for state waitForBlock with multiple peers",
|
||||
startingHeight: 1,
|
||||
maxRequestsPerPeer: 3,
|
||||
steps: []fsmStepTestValues{
|
||||
sStartFSMEv(),
|
||||
sStatusEv("waitForPeer", "waitForBlock", "P1", 3, nil),
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
sStatusEv("waitForBlock", "waitForBlock", "P2", 3, nil),
|
||||
sStateTimeoutEv("waitForBlock", "waitForBlock", "waitForBlock", errNoPeerResponseForCurrentHeights),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
executeFSMTests(t, tests, false)
|
||||
}
|
||||
|
||||
func makeCorrectTransitionSequence(startingHeight int64, numBlocks int64, numPeers int, randomPeerHeights bool,
|
||||
maxRequestsPerPeer int, maxPendingRequests int) testFields {
|
||||
|
||||
// Generate numPeers peers with random or numBlocks heights according to the randomPeerHeights flag.
|
||||
peerHeights := make([]int64, numPeers)
|
||||
for i := 0; i < numPeers; i++ {
|
||||
if i == 0 {
|
||||
peerHeights[0] = numBlocks
|
||||
continue
|
||||
}
|
||||
if randomPeerHeights {
|
||||
peerHeights[i] = int64(cmn.MaxInt(cmn.RandIntn(int(numBlocks)), int(startingHeight)+1))
|
||||
} else {
|
||||
peerHeights[i] = numBlocks
|
||||
}
|
||||
}
|
||||
|
||||
// Approximate the slice capacity to save time for appends.
|
||||
testSteps := make([]fsmStepTestValues, 0, 3*numBlocks+int64(numPeers))
|
||||
|
||||
testName := fmt.Sprintf("%v-blocks %v-startingHeight %v-peers %v-maxRequestsPerPeer %v-maxNumRequests",
|
||||
numBlocks, startingHeight, numPeers, maxRequestsPerPeer, maxPendingRequests)
|
||||
|
||||
// Add startFSMEv step.
|
||||
testSteps = append(testSteps, sStartFSMEv())
|
||||
|
||||
// For each peer, add statusResponseEv step.
|
||||
for i := 0; i < numPeers; i++ {
|
||||
peerName := fmt.Sprintf("P%d", i)
|
||||
if i == 0 {
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sStatusEv("waitForPeer", "waitForBlock", p2p.ID(peerName), peerHeights[i], nil))
|
||||
} else {
|
||||
testSteps = append(testSteps,
|
||||
sStatusEv("waitForBlock", "waitForBlock", p2p.ID(peerName), peerHeights[i], nil))
|
||||
}
|
||||
}
|
||||
|
||||
height := startingHeight
|
||||
numBlocksReceived := 0
|
||||
prevBlocks := make([]int64, 0, maxPendingRequests)
|
||||
|
||||
forLoop:
|
||||
for i := 0; i < int(numBlocks); i++ {
|
||||
|
||||
// Add the makeRequestEv step periodically.
|
||||
if i%maxRequestsPerPeer == 0 {
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sMakeRequestsEv("waitForBlock", "waitForBlock", maxNumRequests),
|
||||
)
|
||||
}
|
||||
|
||||
// Add the blockRespEv step
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sBlockRespEv("waitForBlock", "waitForBlock",
|
||||
"P0", height, prevBlocks))
|
||||
prevBlocks = append(prevBlocks, height)
|
||||
height++
|
||||
numBlocksReceived++
|
||||
|
||||
// Add the processedBlockEv step periodically.
|
||||
if numBlocksReceived >= maxRequestsPerPeer || height >= numBlocks {
|
||||
for j := int(height) - numBlocksReceived; j < int(height); j++ {
|
||||
if j >= int(numBlocks) {
|
||||
// This is the last block that is processed, we should be in "finished" state.
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sProcessedBlockEv("waitForBlock", "finished", nil))
|
||||
break forLoop
|
||||
}
|
||||
testSteps = append(
|
||||
testSteps,
|
||||
sProcessedBlockEv("waitForBlock", "waitForBlock", nil))
|
||||
}
|
||||
numBlocksReceived = 0
|
||||
prevBlocks = make([]int64, 0, maxPendingRequests)
|
||||
}
|
||||
}
|
||||
|
||||
return testFields{
|
||||
name: testName,
|
||||
startingHeight: startingHeight,
|
||||
maxRequestsPerPeer: maxRequestsPerPeer,
|
||||
maxPendingRequests: maxPendingRequests,
|
||||
steps: testSteps,
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
maxStartingHeightTest = 100
|
||||
maxRequestsPerPeerTest = 20
|
||||
maxTotalPendingRequestsTest = 600
|
||||
maxNumPeersTest = 1000
|
||||
maxNumBlocksInChainTest = 10000 //should be smaller than 9999999
|
||||
)
|
||||
|
||||
func makeCorrectTransitionSequenceWithRandomParameters() testFields {
|
||||
// Generate a starting height for fast sync.
|
||||
startingHeight := int64(cmn.RandIntn(maxStartingHeightTest) + 1)
|
||||
|
||||
// Generate the number of requests per peer.
|
||||
maxRequestsPerPeer := cmn.RandIntn(maxRequestsPerPeerTest) + 1
|
||||
|
||||
// Generate the maximum number of total pending requests, >= maxRequestsPerPeer.
|
||||
maxPendingRequests := cmn.RandIntn(maxTotalPendingRequestsTest-maxRequestsPerPeer) + maxRequestsPerPeer
|
||||
|
||||
// Generate the number of blocks to be synced.
|
||||
numBlocks := int64(cmn.RandIntn(maxNumBlocksInChainTest)) + startingHeight
|
||||
|
||||
// Generate a number of peers.
|
||||
numPeers := cmn.RandIntn(maxNumPeersTest) + 1
|
||||
|
||||
return makeCorrectTransitionSequence(startingHeight, numBlocks, numPeers, true, maxRequestsPerPeer, maxPendingRequests)
|
||||
}
|
||||
|
||||
func shouldApplyProcessedBlockEvStep(step *fsmStepTestValues, testBcR *testReactor) bool {
|
||||
if step.event == processedBlockEv {
|
||||
_, err := testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height)
|
||||
if err == errMissingBlock {
|
||||
return false
|
||||
}
|
||||
_, err = testBcR.fsm.pool.BlockAndPeerAtHeight(testBcR.fsm.pool.Height + 1)
|
||||
if err == errMissingBlock {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestFSMCorrectTransitionSequences(t *testing.T) {
|
||||
|
||||
tests := []testFields{
|
||||
makeCorrectTransitionSequence(1, 100, 10, true, 10, 40),
|
||||
makeCorrectTransitionSequenceWithRandomParameters(),
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create test reactor
|
||||
testBcR := newTestReactor(tt.startingHeight)
|
||||
|
||||
if tt.maxRequestsPerPeer != 0 {
|
||||
maxRequestsPerPeer = tt.maxRequestsPerPeer
|
||||
}
|
||||
|
||||
for _, step := range tt.steps {
|
||||
step := step
|
||||
assert.Equal(t, step.currentState, testBcR.fsm.state.name)
|
||||
|
||||
oldNumStatusRequests := testBcR.numStatusRequests
|
||||
fixBlockResponseEvStep(&step, testBcR)
|
||||
if !shouldApplyProcessedBlockEvStep(&step, testBcR) {
|
||||
continue
|
||||
}
|
||||
|
||||
fsmErr := sendEventToFSM(testBcR.fsm, step.event, step.data)
|
||||
assert.Equal(t, step.wantErr, fsmErr)
|
||||
|
||||
if step.wantStatusReqSent {
|
||||
assert.Equal(t, oldNumStatusRequests+1, testBcR.numStatusRequests)
|
||||
} else {
|
||||
assert.Equal(t, oldNumStatusRequests, testBcR.numStatusRequests)
|
||||
}
|
||||
|
||||
assert.Equal(t, step.wantState, testBcR.fsm.state.name)
|
||||
if step.wantState == "finished" {
|
||||
assert.True(t, testBcR.fsm.isCaughtUp())
|
||||
}
|
||||
}
|
||||
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// ----------------------------------------
|
||||
// implements the bcRNotifier
|
||||
func (testR *testReactor) sendPeerError(err error, peerID p2p.ID) {
|
||||
testR.logger.Info("Reactor received sendPeerError call from FSM", "peer", peerID, "err", err)
|
||||
testR.lastPeerError.peerID = peerID
|
||||
testR.lastPeerError.err = err
|
||||
}
|
||||
|
||||
func (testR *testReactor) sendStatusRequest() {
|
||||
testR.logger.Info("Reactor received sendStatusRequest call from FSM")
|
||||
testR.numStatusRequests++
|
||||
}
|
||||
|
||||
func (testR *testReactor) sendBlockRequest(peerID p2p.ID, height int64) error {
|
||||
testR.logger.Info("Reactor received sendBlockRequest call from FSM", "peer", peerID, "height", height)
|
||||
testR.numBlockRequests++
|
||||
testR.lastBlockRequest.peerID = peerID
|
||||
testR.lastBlockRequest.height = height
|
||||
if height == 9999999 {
|
||||
// simulate switch does not have peer
|
||||
return errNilPeerForBlockRequest
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (testR *testReactor) resetStateTimer(name string, timer **time.Timer, timeout time.Duration) {
|
||||
testR.logger.Info("Reactor received resetStateTimer call from FSM", "state", name, "timeout", timeout)
|
||||
if _, ok := testR.stateTimerStarts[name]; !ok {
|
||||
testR.stateTimerStarts[name] = 1
|
||||
} else {
|
||||
testR.stateTimerStarts[name]++
|
||||
}
|
||||
}
|
||||
|
||||
func (testR *testReactor) switchToConsensus() {
|
||||
}
|
||||
|
||||
// ----------------------------------------
|
||||
@@ -1,417 +0,0 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sort"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/mock"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
tmtime "github.com/tendermint/tendermint/types/time"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
var config *cfg.Config
|
||||
|
||||
func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) {
|
||||
validators := make([]types.GenesisValidator, numValidators)
|
||||
privValidators := make([]types.PrivValidator, numValidators)
|
||||
for i := 0; i < numValidators; i++ {
|
||||
val, privVal := types.RandValidator(randPower, minPower)
|
||||
validators[i] = types.GenesisValidator{
|
||||
PubKey: val.PubKey,
|
||||
Power: val.VotingPower,
|
||||
}
|
||||
privValidators[i] = privVal
|
||||
}
|
||||
sort.Sort(types.PrivValidatorsByAddress(privValidators))
|
||||
|
||||
return &types.GenesisDoc{
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: config.ChainID(),
|
||||
Validators: validators,
|
||||
}, privValidators
|
||||
}
|
||||
|
||||
func makeVote(header *types.Header, blockID types.BlockID, valset *types.ValidatorSet, privVal types.PrivValidator) *types.Vote {
|
||||
addr := privVal.GetPubKey().Address()
|
||||
idx, _ := valset.GetByAddress(addr)
|
||||
vote := &types.Vote{
|
||||
ValidatorAddress: addr,
|
||||
ValidatorIndex: idx,
|
||||
Height: header.Height,
|
||||
Round: 1,
|
||||
Timestamp: tmtime.Now(),
|
||||
Type: types.PrecommitType,
|
||||
BlockID: blockID,
|
||||
}
|
||||
|
||||
_ = privVal.SignVote(header.ChainID, vote)
|
||||
|
||||
return vote
|
||||
}
|
||||
|
||||
type BlockchainReactorPair struct {
|
||||
bcR *BlockchainReactor
|
||||
conR *consensusReactorTest
|
||||
}
|
||||
|
||||
func newBlockchainReactor(logger log.Logger, genDoc *types.GenesisDoc, privVals []types.PrivValidator, maxBlockHeight int64) *BlockchainReactor {
|
||||
if len(privVals) != 1 {
|
||||
panic("only support one validator")
|
||||
}
|
||||
|
||||
app := &testApp{}
|
||||
cc := proxy.NewLocalClientCreator(app)
|
||||
proxyApp := proxy.NewAppConns(cc)
|
||||
err := proxyApp.Start()
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "error start app"))
|
||||
}
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
stateDB := dbm.NewMemDB()
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
|
||||
state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "error constructing state from genesis file"))
|
||||
}
|
||||
|
||||
// Make the BlockchainReactor itself.
|
||||
// NOTE we have to create and commit the blocks first because
|
||||
// pool.height is determined from the store.
|
||||
fastSync := true
|
||||
db := dbm.NewMemDB()
|
||||
blockExec := sm.NewBlockExecutor(db, log.TestingLogger(), proxyApp.Consensus(),
|
||||
mock.Mempool{}, sm.MockEvidencePool{})
|
||||
sm.SaveState(db, state)
|
||||
|
||||
// let's add some blocks in
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := types.NewCommit(types.BlockID{}, nil)
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
|
||||
vote := makeVote(&lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0]).CommitSig()
|
||||
lastCommit = types.NewCommit(lastBlockMeta.BlockID, []*types.CommitSig{vote})
|
||||
}
|
||||
|
||||
thisBlock := makeBlock(blockHeight, state, lastCommit)
|
||||
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartsHeader: thisParts.Header()}
|
||||
|
||||
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
if err != nil {
|
||||
panic(errors.Wrap(err, "error apply block"))
|
||||
}
|
||||
|
||||
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
|
||||
}
|
||||
|
||||
bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync)
|
||||
bcReactor.SetLogger(logger.With("module", "blockchain"))
|
||||
|
||||
return bcReactor
|
||||
}
|
||||
|
||||
func newBlockchainReactorPair(logger log.Logger, genDoc *types.GenesisDoc, privVals []types.PrivValidator, maxBlockHeight int64) BlockchainReactorPair {
|
||||
|
||||
consensusReactor := &consensusReactorTest{}
|
||||
consensusReactor.BaseReactor = *p2p.NewBaseReactor("Consensus reactor", consensusReactor)
|
||||
|
||||
return BlockchainReactorPair{
|
||||
newBlockchainReactor(logger, genDoc, privVals, maxBlockHeight),
|
||||
consensusReactor}
|
||||
}
|
||||
|
||||
type consensusReactorTest struct {
|
||||
p2p.BaseReactor // BaseService + p2p.Switch
|
||||
switchedToConsensus bool
|
||||
mtx sync.Mutex
|
||||
}
|
||||
|
||||
func (conR *consensusReactorTest) SwitchToConsensus(state sm.State, blocksSynced int) {
|
||||
conR.mtx.Lock()
|
||||
defer conR.mtx.Unlock()
|
||||
conR.switchedToConsensus = true
|
||||
}
|
||||
|
||||
func TestFastSyncNoBlockResponse(t *testing.T) {
|
||||
|
||||
config = cfg.ResetTestRoot("blockchain_new_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
maxBlockHeight := int64(65)
|
||||
|
||||
reactorPairs := make([]BlockchainReactorPair, 2)
|
||||
|
||||
logger := log.TestingLogger()
|
||||
reactorPairs[0] = newBlockchainReactorPair(logger, genDoc, privVals, maxBlockHeight)
|
||||
reactorPairs[1] = newBlockchainReactorPair(logger, genDoc, privVals, 0)
|
||||
|
||||
p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].bcR)
|
||||
s.AddReactor("CONSENSUS", reactorPairs[i].conR)
|
||||
moduleName := fmt.Sprintf("blockchain-%v", i)
|
||||
reactorPairs[i].bcR.SetLogger(logger.With("module", moduleName))
|
||||
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
_ = r.bcR.Stop()
|
||||
_ = r.conR.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
tests := []struct {
|
||||
height int64
|
||||
existent bool
|
||||
}{
|
||||
{maxBlockHeight + 2, false},
|
||||
{10, true},
|
||||
{1, true},
|
||||
{maxBlockHeight + 100, false},
|
||||
}
|
||||
|
||||
for {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
reactorPairs[1].conR.mtx.Lock()
|
||||
if reactorPairs[1].conR.switchedToConsensus {
|
||||
reactorPairs[1].conR.mtx.Unlock()
|
||||
break
|
||||
}
|
||||
reactorPairs[1].conR.mtx.Unlock()
|
||||
}
|
||||
|
||||
assert.Equal(t, maxBlockHeight, reactorPairs[0].bcR.store.Height())
|
||||
|
||||
for _, tt := range tests {
|
||||
block := reactorPairs[1].bcR.store.LoadBlock(tt.height)
|
||||
if tt.existent {
|
||||
assert.True(t, block != nil)
|
||||
} else {
|
||||
assert.True(t, block == nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: This is too hard to test without
|
||||
// an easy way to add test peer to switch
|
||||
// or without significant refactoring of the module.
|
||||
// Alternatively we could actually dial a TCP conn but
|
||||
// that seems extreme.
|
||||
func TestFastSyncBadBlockStopsPeer(t *testing.T) {
|
||||
numNodes := 4
|
||||
maxBlockHeight := int64(148)
|
||||
|
||||
config = cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
genDoc, privVals := randGenesisDoc(1, false, 30)
|
||||
|
||||
otherChain := newBlockchainReactorPair(log.TestingLogger(), genDoc, privVals, maxBlockHeight)
|
||||
defer func() {
|
||||
_ = otherChain.bcR.Stop()
|
||||
_ = otherChain.conR.Stop()
|
||||
}()
|
||||
|
||||
reactorPairs := make([]BlockchainReactorPair, numNodes)
|
||||
logger := make([]log.Logger, numNodes)
|
||||
|
||||
for i := 0; i < numNodes; i++ {
|
||||
logger[i] = log.TestingLogger()
|
||||
height := int64(0)
|
||||
if i == 0 {
|
||||
height = maxBlockHeight
|
||||
}
|
||||
reactorPairs[i] = newBlockchainReactorPair(logger[i], genDoc, privVals, height)
|
||||
}
|
||||
|
||||
switches := p2p.MakeConnectedSwitches(config.P2P, numNodes, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
reactorPairs[i].conR.mtx.Lock()
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[i].bcR)
|
||||
s.AddReactor("CONSENSUS", reactorPairs[i].conR)
|
||||
moduleName := fmt.Sprintf("blockchain-%v", i)
|
||||
reactorPairs[i].bcR.SetLogger(logger[i].With("module", moduleName))
|
||||
reactorPairs[i].conR.mtx.Unlock()
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)
|
||||
|
||||
defer func() {
|
||||
for _, r := range reactorPairs {
|
||||
_ = r.bcR.Stop()
|
||||
_ = r.conR.Stop()
|
||||
}
|
||||
}()
|
||||
|
||||
outerFor:
|
||||
for {
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
for i := 0; i < numNodes; i++ {
|
||||
reactorPairs[i].conR.mtx.Lock()
|
||||
if !reactorPairs[i].conR.switchedToConsensus {
|
||||
reactorPairs[i].conR.mtx.Unlock()
|
||||
continue outerFor
|
||||
}
|
||||
reactorPairs[i].conR.mtx.Unlock()
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
//at this time, reactors[0-3] is the newest
|
||||
assert.Equal(t, numNodes-1, reactorPairs[1].bcR.Switch.Peers().Size())
|
||||
|
||||
//mark last reactorPair as an invalid peer
|
||||
reactorPairs[numNodes-1].bcR.store = otherChain.bcR.store
|
||||
|
||||
lastLogger := log.TestingLogger()
|
||||
lastReactorPair := newBlockchainReactorPair(lastLogger, genDoc, privVals, 0)
|
||||
reactorPairs = append(reactorPairs, lastReactorPair)
|
||||
|
||||
switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch {
|
||||
s.AddReactor("BLOCKCHAIN", reactorPairs[len(reactorPairs)-1].bcR)
|
||||
s.AddReactor("CONSENSUS", reactorPairs[len(reactorPairs)-1].conR)
|
||||
moduleName := fmt.Sprintf("blockchain-%v", len(reactorPairs)-1)
|
||||
reactorPairs[len(reactorPairs)-1].bcR.SetLogger(lastLogger.With("module", moduleName))
|
||||
return s
|
||||
|
||||
}, p2p.Connect2Switches)...)
|
||||
|
||||
for i := 0; i < len(reactorPairs)-1; i++ {
|
||||
p2p.Connect2Switches(switches, i, len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
for {
|
||||
time.Sleep(1 * time.Second)
|
||||
lastReactorPair.conR.mtx.Lock()
|
||||
if lastReactorPair.conR.switchedToConsensus {
|
||||
lastReactorPair.conR.mtx.Unlock()
|
||||
break
|
||||
}
|
||||
lastReactorPair.conR.mtx.Unlock()
|
||||
|
||||
if lastReactorPair.bcR.Switch.Peers().Size() == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
assert.True(t, lastReactorPair.bcR.Switch.Peers().Size() < len(reactorPairs)-1)
|
||||
}
|
||||
|
||||
func TestBcBlockRequestMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
requestHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Request Message", 0, false},
|
||||
{"Valid Request Message", 1, false},
|
||||
{"Invalid Request Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
request := bcBlockRequestMessage{Height: tc.requestHeight}
|
||||
assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcNoBlockResponseMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
nonResponseHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Non-Response Message", 0, false},
|
||||
{"Valid Non-Response Message", 1, false},
|
||||
{"Invalid Non-Response Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
nonResponse := bcNoBlockResponseMessage{Height: tc.nonResponseHeight}
|
||||
assert.Equal(t, tc.expectErr, nonResponse.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcStatusRequestMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
requestHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Request Message", 0, false},
|
||||
{"Valid Request Message", 1, false},
|
||||
{"Invalid Request Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
request := bcStatusRequestMessage{Height: tc.requestHeight}
|
||||
assert.Equal(t, tc.expectErr, request.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBcStatusResponseMessageValidateBasic(t *testing.T) {
|
||||
testCases := []struct {
|
||||
testName string
|
||||
responseHeight int64
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Response Message", 0, false},
|
||||
{"Valid Response Message", 1, false},
|
||||
{"Invalid Response Message", -1, true},
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
tc := tc
|
||||
t.Run(tc.testName, func(t *testing.T) {
|
||||
response := bcStatusResponseMessage{Height: tc.responseHeight}
|
||||
assert.Equal(t, tc.expectErr, response.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
//----------------------------------------------
|
||||
// utility funcs
|
||||
|
||||
func makeTxs(height int64) (txs []types.Tx) {
|
||||
for i := 0; i < 10; i++ {
|
||||
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
|
||||
}
|
||||
return txs
|
||||
}
|
||||
|
||||
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
|
||||
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
|
||||
return block
|
||||
}
|
||||
|
||||
type testApp struct {
|
||||
abci.BaseApplication
|
||||
}
|
||||
@@ -1,124 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics"
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
"github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
// MetricsSubsystem is a subsystem shared by all metrics exposed by this
|
||||
// package.
|
||||
MetricsSubsystem = "blockchain"
|
||||
)
|
||||
|
||||
// Metrics contains metrics exposed by this package.
|
||||
type Metrics struct {
|
||||
// events_in
|
||||
EventsIn metrics.Counter
|
||||
// events_in
|
||||
EventsHandled metrics.Counter
|
||||
// events_out
|
||||
EventsOut metrics.Counter
|
||||
// errors_in
|
||||
ErrorsIn metrics.Counter
|
||||
// errors_handled
|
||||
ErrorsHandled metrics.Counter
|
||||
// errors_out
|
||||
ErrorsOut metrics.Counter
|
||||
// events_shed
|
||||
EventsShed metrics.Counter
|
||||
// events_sent
|
||||
EventsSent metrics.Counter
|
||||
// errors_sent
|
||||
ErrorsSent metrics.Counter
|
||||
// errors_shed
|
||||
ErrorsShed metrics.Counter
|
||||
}
|
||||
|
||||
// Can we burn in the routine name here?
|
||||
func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
labels := []string{}
|
||||
for i := 0; i < len(labelsAndValues); i += 2 {
|
||||
labels = append(labels, labelsAndValues[i])
|
||||
}
|
||||
return &Metrics{
|
||||
EventsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_in",
|
||||
Help: "Events read from the channel.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_handled",
|
||||
Help: "Events handled",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_out",
|
||||
Help: "Events output from routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_in",
|
||||
Help: "Errors read from the channel.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_handled",
|
||||
Help: "Errors handled.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_out",
|
||||
Help: "Errors output from routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_sent",
|
||||
Help: "Errors sent to routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
ErrorsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "errors_shed",
|
||||
Help: "Errors dropped from sending.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_sent",
|
||||
Help: "Events sent to routine.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
EventsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "events_shed",
|
||||
Help: "Events dropped from sending.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
// NopMetrics returns no-op Metrics.
|
||||
func NopMetrics() *Metrics {
|
||||
return &Metrics{
|
||||
EventsIn: discard.NewCounter(),
|
||||
EventsHandled: discard.NewCounter(),
|
||||
EventsOut: discard.NewCounter(),
|
||||
ErrorsIn: discard.NewCounter(),
|
||||
ErrorsHandled: discard.NewCounter(),
|
||||
ErrorsOut: discard.NewCounter(),
|
||||
EventsShed: discard.NewCounter(),
|
||||
EventsSent: discard.NewCounter(),
|
||||
ErrorsSent: discard.NewCounter(),
|
||||
ErrorsShed: discard.NewCounter(),
|
||||
}
|
||||
}
|
||||
@@ -1,117 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
type timeCheck struct {
|
||||
priorityHigh
|
||||
time time.Time
|
||||
}
|
||||
|
||||
func schedulerHandle(event Event) (Event, error) {
|
||||
if _, ok := event.(timeCheck); ok {
|
||||
fmt.Println("scheduler handle timeCheck")
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func processorHandle(event Event) (Event, error) {
|
||||
if _, ok := event.(timeCheck); ok {
|
||||
fmt.Println("processor handle timeCheck")
|
||||
}
|
||||
return noOp, nil
|
||||
|
||||
}
|
||||
|
||||
type Reactor struct {
|
||||
events chan Event
|
||||
stopDemux chan struct{}
|
||||
scheduler *Routine
|
||||
processor *Routine
|
||||
ticker *time.Ticker
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func NewReactor(bufferSize int) *Reactor {
|
||||
return &Reactor{
|
||||
events: make(chan Event, bufferSize),
|
||||
stopDemux: make(chan struct{}),
|
||||
scheduler: newRoutine("scheduler", schedulerHandle, bufferSize),
|
||||
processor: newRoutine("processor", processorHandle, bufferSize),
|
||||
ticker: time.NewTicker(1 * time.Second),
|
||||
logger: log.NewNopLogger(),
|
||||
}
|
||||
}
|
||||
|
||||
// nolint:unused
|
||||
func (r *Reactor) setLogger(logger log.Logger) {
|
||||
r.logger = logger
|
||||
r.scheduler.setLogger(logger)
|
||||
r.processor.setLogger(logger)
|
||||
}
|
||||
|
||||
func (r *Reactor) Start() {
|
||||
go r.scheduler.start()
|
||||
go r.processor.start()
|
||||
go r.demux()
|
||||
|
||||
<-r.scheduler.ready()
|
||||
<-r.processor.ready()
|
||||
|
||||
go func() {
|
||||
for t := range r.ticker.C {
|
||||
r.events <- timeCheck{time: t}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// XXX: Would it be possible here to provide some kind of type safety for the types
|
||||
// of events that each routine can produce and consume?
|
||||
func (r *Reactor) demux() {
|
||||
for {
|
||||
select {
|
||||
case event := <-r.events:
|
||||
// XXX: check for backpressure
|
||||
r.scheduler.send(event)
|
||||
r.processor.send(event)
|
||||
case <-r.stopDemux:
|
||||
r.logger.Info("demuxing stopped")
|
||||
return
|
||||
case event := <-r.scheduler.next():
|
||||
r.processor.send(event)
|
||||
case event := <-r.processor.next():
|
||||
r.scheduler.send(event)
|
||||
case err := <-r.scheduler.final():
|
||||
r.logger.Info(fmt.Sprintf("scheduler final %s", err))
|
||||
case err := <-r.processor.final():
|
||||
r.logger.Info(fmt.Sprintf("processor final %s", err))
|
||||
// XXX: switch to consensus
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reactor) Stop() {
|
||||
r.logger.Info("reactor stopping")
|
||||
|
||||
r.ticker.Stop()
|
||||
r.scheduler.stop()
|
||||
r.processor.stop()
|
||||
close(r.stopDemux)
|
||||
close(r.events)
|
||||
|
||||
r.logger.Info("reactor stopped")
|
||||
}
|
||||
|
||||
func (r *Reactor) Receive(event Event) {
|
||||
// XXX: decode and serialize write events
|
||||
// TODO: backpressure
|
||||
r.events <- event
|
||||
}
|
||||
|
||||
func (r *Reactor) AddPeer() {
|
||||
// TODO: add peer event and send to demuxer
|
||||
}
|
||||
@@ -1,22 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestReactor(t *testing.T) {
|
||||
var (
|
||||
bufferSize = 10
|
||||
reactor = NewReactor(bufferSize)
|
||||
)
|
||||
|
||||
reactor.Start()
|
||||
script := []Event{
|
||||
// TODO
|
||||
}
|
||||
|
||||
for _, event := range script {
|
||||
reactor.Receive(event)
|
||||
}
|
||||
reactor.Stop()
|
||||
}
|
||||
@@ -1,134 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/Workiva/go-datastructures/queue"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
)
|
||||
|
||||
type handleFunc = func(event Event) (Event, error)
|
||||
|
||||
// Routines are a structure which model a finite state machine as serialized
|
||||
// stream of events processed by a handle function. This Routine structure
|
||||
// handles the concurrency and messaging guarantees. Events are sent via
|
||||
// `send` are handled by the `handle` function to produce an iterator
|
||||
// `next()`. Calling `close()` on a routine will conclude processing of all
|
||||
// sent events and produce `final()` event representing the terminal state.
|
||||
type Routine struct {
|
||||
name string
|
||||
handle handleFunc
|
||||
queue *queue.PriorityQueue
|
||||
out chan Event
|
||||
fin chan error
|
||||
rdy chan struct{}
|
||||
running *uint32
|
||||
logger log.Logger
|
||||
metrics *Metrics
|
||||
}
|
||||
|
||||
func newRoutine(name string, handleFunc handleFunc, bufferSize int) *Routine {
|
||||
return &Routine{
|
||||
name: name,
|
||||
handle: handleFunc,
|
||||
queue: queue.NewPriorityQueue(bufferSize, true),
|
||||
out: make(chan Event, bufferSize),
|
||||
rdy: make(chan struct{}, 1),
|
||||
fin: make(chan error, 1),
|
||||
running: new(uint32),
|
||||
logger: log.NewNopLogger(),
|
||||
metrics: NopMetrics(),
|
||||
}
|
||||
}
|
||||
|
||||
// nolint: unused
|
||||
func (rt *Routine) setLogger(logger log.Logger) {
|
||||
rt.logger = logger
|
||||
}
|
||||
|
||||
// nolint:unused
|
||||
func (rt *Routine) setMetrics(metrics *Metrics) {
|
||||
rt.metrics = metrics
|
||||
}
|
||||
|
||||
func (rt *Routine) start() {
|
||||
rt.logger.Info(fmt.Sprintf("%s: run\n", rt.name))
|
||||
running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1))
|
||||
if !running {
|
||||
panic(fmt.Sprintf("%s is already running", rt.name))
|
||||
}
|
||||
close(rt.rdy)
|
||||
defer func() {
|
||||
stopped := atomic.CompareAndSwapUint32(rt.running, uint32(1), uint32(0))
|
||||
if !stopped {
|
||||
panic(fmt.Sprintf("%s is failed to stop", rt.name))
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
events, err := rt.queue.Get(1)
|
||||
if err != nil {
|
||||
rt.logger.Info(fmt.Sprintf("%s: stopping\n", rt.name))
|
||||
rt.terminate(fmt.Errorf("stopped"))
|
||||
return
|
||||
}
|
||||
oEvent, err := rt.handle(events[0].(Event))
|
||||
rt.metrics.EventsHandled.With("routine", rt.name).Add(1)
|
||||
if err != nil {
|
||||
rt.terminate(err)
|
||||
return
|
||||
}
|
||||
rt.metrics.EventsOut.With("routine", rt.name).Add(1)
|
||||
rt.logger.Debug(fmt.Sprintf("%s produced %T %+v\n", rt.name, oEvent, oEvent))
|
||||
|
||||
rt.out <- oEvent
|
||||
}
|
||||
}
|
||||
|
||||
// XXX: look into returning OpError in the net package
|
||||
func (rt *Routine) send(event Event) bool {
|
||||
rt.logger.Debug(fmt.Sprintf("%s: received %T %+v", rt.name, event, event))
|
||||
if !rt.isRunning() {
|
||||
return false
|
||||
}
|
||||
err := rt.queue.Put(event)
|
||||
if err != nil {
|
||||
rt.metrics.EventsShed.With("routine", rt.name).Add(1)
|
||||
rt.logger.Info(fmt.Sprintf("%s: send failed, queue was full/stopped \n", rt.name))
|
||||
return false
|
||||
}
|
||||
rt.metrics.EventsSent.With("routine", rt.name).Add(1)
|
||||
return true
|
||||
}
|
||||
|
||||
func (rt *Routine) isRunning() bool {
|
||||
return atomic.LoadUint32(rt.running) == 1
|
||||
}
|
||||
|
||||
func (rt *Routine) next() chan Event {
|
||||
return rt.out
|
||||
}
|
||||
|
||||
func (rt *Routine) ready() chan struct{} {
|
||||
return rt.rdy
|
||||
}
|
||||
|
||||
func (rt *Routine) stop() {
|
||||
if !rt.isRunning() {
|
||||
return
|
||||
}
|
||||
|
||||
rt.logger.Info(fmt.Sprintf("%s: stop\n", rt.name))
|
||||
rt.queue.Dispose() // this should block until all queue items are free?
|
||||
}
|
||||
|
||||
func (rt *Routine) final() chan error {
|
||||
return rt.fin
|
||||
}
|
||||
|
||||
// XXX: Maybe get rid of this
|
||||
func (rt *Routine) terminate(reason error) {
|
||||
close(rt.out)
|
||||
rt.fin <- reason
|
||||
}
|
||||
@@ -1,163 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type eventA struct {
|
||||
priorityNormal
|
||||
}
|
||||
|
||||
var done = fmt.Errorf("done")
|
||||
|
||||
func simpleHandler(event Event) (Event, error) {
|
||||
if _, ok := event.(eventA); ok {
|
||||
return noOp, done
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func TestRoutineFinal(t *testing.T) {
|
||||
var (
|
||||
bufferSize = 10
|
||||
routine = newRoutine("simpleRoutine", simpleHandler, bufferSize)
|
||||
)
|
||||
|
||||
assert.False(t, routine.isRunning(),
|
||||
"expected an initialized routine to not be running")
|
||||
go routine.start()
|
||||
<-routine.ready()
|
||||
assert.True(t, routine.isRunning(),
|
||||
"expected an started routine")
|
||||
|
||||
assert.True(t, routine.send(eventA{}),
|
||||
"expected sending to a ready routine to succeed")
|
||||
|
||||
assert.Equal(t, done, <-routine.final(),
|
||||
"expected the final event to be done")
|
||||
|
||||
assert.False(t, routine.isRunning(),
|
||||
"expected an completed routine to no longer be running")
|
||||
}
|
||||
|
||||
func TestRoutineStop(t *testing.T) {
|
||||
var (
|
||||
bufferSize = 10
|
||||
routine = newRoutine("simpleRoutine", simpleHandler, bufferSize)
|
||||
)
|
||||
|
||||
assert.False(t, routine.send(eventA{}),
|
||||
"expected sending to an unstarted routine to fail")
|
||||
|
||||
go routine.start()
|
||||
<-routine.ready()
|
||||
|
||||
assert.True(t, routine.send(eventA{}),
|
||||
"expected sending to a running routine to succeed")
|
||||
|
||||
routine.stop()
|
||||
|
||||
assert.False(t, routine.send(eventA{}),
|
||||
"expected sending to a stopped routine to fail")
|
||||
}
|
||||
|
||||
type finalCount struct {
|
||||
count int
|
||||
}
|
||||
|
||||
func (f finalCount) Error() string {
|
||||
return "end"
|
||||
}
|
||||
|
||||
func genStatefulHandler(maxCount int) handleFunc {
|
||||
counter := 0
|
||||
return func(event Event) (Event, error) {
|
||||
if _, ok := event.(eventA); ok {
|
||||
counter += 1
|
||||
if counter >= maxCount {
|
||||
return noOp, finalCount{counter}
|
||||
}
|
||||
|
||||
return eventA{}, nil
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
}
|
||||
|
||||
func feedback(r *Routine) {
|
||||
for event := range r.next() {
|
||||
r.send(event)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatefulRoutine(t *testing.T) {
|
||||
var (
|
||||
count = 10
|
||||
handler = genStatefulHandler(count)
|
||||
bufferSize = 20
|
||||
routine = newRoutine("statefulRoutine", handler, bufferSize)
|
||||
)
|
||||
|
||||
go routine.start()
|
||||
go feedback(routine)
|
||||
<-routine.ready()
|
||||
|
||||
assert.True(t, routine.send(eventA{}),
|
||||
"expected sending to a started routine to succeed")
|
||||
|
||||
final := <-routine.final()
|
||||
if fnl, ok := final.(finalCount); ok {
|
||||
assert.Equal(t, count, fnl.count,
|
||||
"expected the routine to count to 10")
|
||||
} else {
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
type lowPriorityEvent struct {
|
||||
priorityLow
|
||||
}
|
||||
|
||||
type highPriorityEvent struct {
|
||||
priorityHigh
|
||||
}
|
||||
|
||||
func handleWithPriority(event Event) (Event, error) {
|
||||
switch event.(type) {
|
||||
case lowPriorityEvent:
|
||||
return noOp, nil
|
||||
case highPriorityEvent:
|
||||
return noOp, done
|
||||
}
|
||||
return noOp, nil
|
||||
}
|
||||
|
||||
func TestPriority(t *testing.T) {
|
||||
var (
|
||||
bufferSize = 20
|
||||
routine = newRoutine("priorityRoutine", handleWithPriority, bufferSize)
|
||||
)
|
||||
|
||||
go routine.start()
|
||||
<-routine.ready()
|
||||
go func() {
|
||||
for {
|
||||
routine.send(lowPriorityEvent{})
|
||||
time.Sleep(1 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
assert.True(t, routine.isRunning(),
|
||||
"expected an started routine")
|
||||
assert.True(t, routine.send(highPriorityEvent{}),
|
||||
"expected send to succeed even when saturated")
|
||||
|
||||
assert.Equal(t, done, <-routine.final())
|
||||
assert.False(t, routine.isRunning(),
|
||||
"expected an started routine")
|
||||
}
|
||||
@@ -1,385 +0,0 @@
|
||||
// nolint:unused
|
||||
package v2
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
type blockState int
|
||||
|
||||
const (
|
||||
blockStateUnknown blockState = iota
|
||||
blockStateNew
|
||||
blockStatePending
|
||||
blockStateReceived
|
||||
blockStateProcessed
|
||||
)
|
||||
|
||||
func (e blockState) String() string {
|
||||
switch e {
|
||||
case blockStateUnknown:
|
||||
return "Unknown"
|
||||
case blockStateNew:
|
||||
return "New"
|
||||
case blockStatePending:
|
||||
return "Pending"
|
||||
case blockStateReceived:
|
||||
return "Received"
|
||||
case blockStateProcessed:
|
||||
return "Processed"
|
||||
default:
|
||||
return fmt.Sprintf("unknown blockState: %d", e)
|
||||
}
|
||||
}
|
||||
|
||||
type peerState int
|
||||
|
||||
const (
|
||||
peerStateNew = iota
|
||||
peerStateReady
|
||||
peerStateRemoved
|
||||
)
|
||||
|
||||
func (e peerState) String() string {
|
||||
switch e {
|
||||
case peerStateNew:
|
||||
return "New"
|
||||
case peerStateReady:
|
||||
return "Ready"
|
||||
case peerStateRemoved:
|
||||
return "Removed"
|
||||
default:
|
||||
return fmt.Sprintf("unknown peerState: %d", e)
|
||||
}
|
||||
}
|
||||
|
||||
type scPeer struct {
|
||||
peerID p2p.ID
|
||||
state peerState
|
||||
height int64
|
||||
lastTouched time.Time
|
||||
lastRate int64
|
||||
}
|
||||
|
||||
func newScPeer(peerID p2p.ID) *scPeer {
|
||||
return &scPeer{
|
||||
peerID: peerID,
|
||||
state: peerStateNew,
|
||||
height: -1,
|
||||
lastTouched: time.Time{},
|
||||
}
|
||||
}
|
||||
|
||||
// The schedule is a composite data structure which allows a scheduler to keep
|
||||
// track of which blocks have been scheduled into which state.
|
||||
type schedule struct {
|
||||
initHeight int64
|
||||
// a list of blocks in which blockState
|
||||
blockStates map[int64]blockState
|
||||
|
||||
// a map of peerID to schedule specific peer struct `scPeer` used to keep
|
||||
// track of peer specific state
|
||||
peers map[p2p.ID]*scPeer
|
||||
|
||||
// a map of heights to the peer we are waiting for a response from
|
||||
pendingBlocks map[int64]p2p.ID
|
||||
|
||||
// the time at which a block was put in blockStatePending
|
||||
pendingTime map[int64]time.Time
|
||||
|
||||
// the peerID of the peer which put the block in blockStateReceived
|
||||
receivedBlocks map[int64]p2p.ID
|
||||
}
|
||||
|
||||
func newSchedule(initHeight int64) *schedule {
|
||||
sc := schedule{
|
||||
initHeight: initHeight,
|
||||
blockStates: make(map[int64]blockState),
|
||||
peers: make(map[p2p.ID]*scPeer),
|
||||
pendingBlocks: make(map[int64]p2p.ID),
|
||||
pendingTime: make(map[int64]time.Time),
|
||||
receivedBlocks: make(map[int64]p2p.ID),
|
||||
}
|
||||
|
||||
sc.setStateAtHeight(initHeight, blockStateNew)
|
||||
|
||||
return &sc
|
||||
}
|
||||
|
||||
func (sc *schedule) addPeer(peerID p2p.ID) error {
|
||||
if _, ok := sc.peers[peerID]; ok {
|
||||
return fmt.Errorf("Cannot add duplicate peer %s", peerID)
|
||||
}
|
||||
sc.peers[peerID] = newScPeer(peerID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *schedule) touchPeer(peerID p2p.ID, time time.Time) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("Couldn't find peer %s", peerID)
|
||||
}
|
||||
|
||||
if peer.state == peerStateRemoved {
|
||||
return fmt.Errorf("Tried to touch peer in peerStateRemoved")
|
||||
}
|
||||
|
||||
peer.lastTouched = time
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *schedule) removePeer(peerID p2p.ID) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("Couldn't find peer %s", peerID)
|
||||
}
|
||||
|
||||
if peer.state == peerStateRemoved {
|
||||
return fmt.Errorf("Tried to remove peer %s in peerStateRemoved", peerID)
|
||||
}
|
||||
|
||||
for height, pendingPeerID := range sc.pendingBlocks {
|
||||
if pendingPeerID == peerID {
|
||||
sc.setStateAtHeight(height, blockStateNew)
|
||||
delete(sc.pendingTime, height)
|
||||
delete(sc.pendingBlocks, height)
|
||||
}
|
||||
}
|
||||
|
||||
for height, rcvPeerID := range sc.receivedBlocks {
|
||||
if rcvPeerID == peerID {
|
||||
sc.setStateAtHeight(height, blockStateNew)
|
||||
delete(sc.receivedBlocks, height)
|
||||
}
|
||||
}
|
||||
|
||||
peer.state = peerStateRemoved
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *schedule) setPeerHeight(peerID p2p.ID, height int64) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("Can't find peer %s", peerID)
|
||||
}
|
||||
|
||||
if peer.state == peerStateRemoved {
|
||||
return fmt.Errorf("Cannot set peer height for a peer in peerStateRemoved")
|
||||
}
|
||||
|
||||
if height < peer.height {
|
||||
return fmt.Errorf("Cannot move peer height lower. from %d to %d", peer.height, height)
|
||||
}
|
||||
|
||||
peer.height = height
|
||||
peer.state = peerStateReady
|
||||
for i := sc.minHeight(); i <= height; i++ {
|
||||
if sc.getStateAtHeight(i) == blockStateUnknown {
|
||||
sc.setStateAtHeight(i, blockStateNew)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *schedule) getStateAtHeight(height int64) blockState {
|
||||
if height < sc.initHeight {
|
||||
return blockStateProcessed
|
||||
} else if state, ok := sc.blockStates[height]; ok {
|
||||
return state
|
||||
} else {
|
||||
return blockStateUnknown
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *schedule) getPeersAtHeight(height int64) []*scPeer {
|
||||
peers := []*scPeer{}
|
||||
for _, peer := range sc.peers {
|
||||
if peer.height >= height {
|
||||
peers = append(peers, peer)
|
||||
}
|
||||
}
|
||||
|
||||
return peers
|
||||
}
|
||||
|
||||
func (sc *schedule) peersInactiveSince(duration time.Duration, now time.Time) []p2p.ID {
|
||||
peers := []p2p.ID{}
|
||||
for _, peer := range sc.peers {
|
||||
if now.Sub(peer.lastTouched) > duration {
|
||||
peers = append(peers, peer.peerID)
|
||||
}
|
||||
}
|
||||
|
||||
return peers
|
||||
}
|
||||
|
||||
func (sc *schedule) peersSlowerThan(minSpeed int64) []p2p.ID {
|
||||
peers := []p2p.ID{}
|
||||
for _, peer := range sc.peers {
|
||||
if peer.lastRate < minSpeed {
|
||||
peers = append(peers, peer.peerID)
|
||||
}
|
||||
}
|
||||
|
||||
return peers
|
||||
}
|
||||
|
||||
func (sc *schedule) setStateAtHeight(height int64, state blockState) {
|
||||
sc.blockStates[height] = state
|
||||
}
|
||||
|
||||
func (sc *schedule) markReceived(peerID p2p.ID, height int64, size int64, now time.Time) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("Can't find peer %s", peerID)
|
||||
}
|
||||
|
||||
if peer.state == peerStateRemoved {
|
||||
return fmt.Errorf("Cannot receive blocks from removed peer %s", peerID)
|
||||
}
|
||||
|
||||
if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID {
|
||||
return fmt.Errorf("Received block %d from peer %s without being requested", height, peerID)
|
||||
}
|
||||
|
||||
pendingTime, ok := sc.pendingTime[height]
|
||||
if !ok || now.Sub(pendingTime) <= 0 {
|
||||
return fmt.Errorf("Clock error. Block %d received at %s but requested at %s",
|
||||
height, pendingTime, now)
|
||||
}
|
||||
|
||||
peer.lastRate = size / int64(now.Sub(pendingTime).Seconds())
|
||||
|
||||
sc.setStateAtHeight(height, blockStateReceived)
|
||||
delete(sc.pendingBlocks, height)
|
||||
delete(sc.pendingTime, height)
|
||||
|
||||
sc.receivedBlocks[height] = peerID
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *schedule) markPending(peerID p2p.ID, height int64, time time.Time) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("Can't find peer %s", peerID)
|
||||
}
|
||||
|
||||
state := sc.getStateAtHeight(height)
|
||||
if state != blockStateNew {
|
||||
return fmt.Errorf("Block %d should be in blockStateNew but was %s", height, state)
|
||||
}
|
||||
|
||||
if peer.state != peerStateReady {
|
||||
return fmt.Errorf("Cannot schedule %d from %s in %s", height, peerID, peer.state)
|
||||
}
|
||||
|
||||
if height > peer.height {
|
||||
return fmt.Errorf("Cannot request height %d from peer %s who is at height %d",
|
||||
height, peerID, peer.height)
|
||||
}
|
||||
|
||||
sc.setStateAtHeight(height, blockStatePending)
|
||||
sc.pendingBlocks[height] = peerID
|
||||
// XXX: to make this more accurate we can introduce a message from
|
||||
// the IO routine which indicates the time the request was put on the wire
|
||||
sc.pendingTime[height] = time
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *schedule) markProcessed(height int64) error {
|
||||
state := sc.getStateAtHeight(height)
|
||||
if state != blockStateReceived {
|
||||
return fmt.Errorf("Can't mark height %d received from block state %s", height, state)
|
||||
}
|
||||
|
||||
delete(sc.receivedBlocks, height)
|
||||
|
||||
sc.setStateAtHeight(height, blockStateProcessed)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// allBlockProcessed returns true if all blocks are in blockStateProcessed and
|
||||
// determines if the schedule has been completed
|
||||
func (sc *schedule) allBlocksProcessed() bool {
|
||||
for _, state := range sc.blockStates {
|
||||
if state != blockStateProcessed {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// highest block | state == blockStateNew
|
||||
func (sc *schedule) maxHeight() int64 {
|
||||
var max int64 = 0
|
||||
for height, state := range sc.blockStates {
|
||||
if state == blockStateNew && height > max {
|
||||
max = height
|
||||
}
|
||||
}
|
||||
|
||||
return max
|
||||
}
|
||||
|
||||
// lowest block | state == blockStateNew
|
||||
func (sc *schedule) minHeight() int64 {
|
||||
var min int64 = math.MaxInt64
|
||||
for height, state := range sc.blockStates {
|
||||
if state == blockStateNew && height < min {
|
||||
min = height
|
||||
}
|
||||
}
|
||||
|
||||
return min
|
||||
}
|
||||
|
||||
func (sc *schedule) pendingFrom(peerID p2p.ID) []int64 {
|
||||
heights := []int64{}
|
||||
for height, pendingPeerID := range sc.pendingBlocks {
|
||||
if pendingPeerID == peerID {
|
||||
heights = append(heights, height)
|
||||
}
|
||||
}
|
||||
return heights
|
||||
}
|
||||
|
||||
func (sc *schedule) selectPeer(peers []*scPeer) *scPeer {
|
||||
// FIXME: properPeerSelector
|
||||
s := rand.NewSource(time.Now().Unix())
|
||||
r := rand.New(s)
|
||||
|
||||
return peers[r.Intn(len(peers))]
|
||||
}
|
||||
|
||||
// XXX: this duplicates the logic of peersInactiveSince and peersSlowerThan
|
||||
func (sc *schedule) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.ID {
|
||||
prunable := []p2p.ID{}
|
||||
for peerID, peer := range sc.peers {
|
||||
if now.Sub(peer.lastTouched) > peerTimout || peer.lastRate < minRecvRate {
|
||||
prunable = append(prunable, peerID)
|
||||
}
|
||||
}
|
||||
|
||||
return prunable
|
||||
}
|
||||
|
||||
func (sc *schedule) numBlockInState(targetState blockState) uint32 {
|
||||
var num uint32 = 0
|
||||
for _, state := range sc.blockStates {
|
||||
if state == targetState {
|
||||
num++
|
||||
}
|
||||
}
|
||||
return num
|
||||
}
|
||||
@@ -1,272 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
)
|
||||
|
||||
func TestScheduleInit(t *testing.T) {
|
||||
var (
|
||||
initHeight int64 = 5
|
||||
sc = newSchedule(initHeight)
|
||||
)
|
||||
|
||||
assert.Equal(t, blockStateNew, sc.getStateAtHeight(initHeight))
|
||||
assert.Equal(t, blockStateProcessed, sc.getStateAtHeight(initHeight-1))
|
||||
assert.Equal(t, blockStateUnknown, sc.getStateAtHeight(initHeight+1))
|
||||
}
|
||||
|
||||
func TestAddPeer(t *testing.T) {
|
||||
var (
|
||||
initHeight int64 = 5
|
||||
peerID p2p.ID = "1"
|
||||
peerIDTwo p2p.ID = "2"
|
||||
sc = newSchedule(initHeight)
|
||||
)
|
||||
|
||||
assert.Nil(t, sc.addPeer(peerID))
|
||||
assert.Nil(t, sc.addPeer(peerIDTwo))
|
||||
assert.Error(t, sc.addPeer(peerID))
|
||||
}
|
||||
|
||||
func TestTouchPeer(t *testing.T) {
|
||||
var (
|
||||
initHeight int64 = 5
|
||||
peerID p2p.ID = "1"
|
||||
sc = newSchedule(initHeight)
|
||||
now = time.Now()
|
||||
)
|
||||
|
||||
assert.Error(t, sc.touchPeer(peerID, now),
|
||||
"Touching an unknown peer should return errPeerNotFound")
|
||||
|
||||
assert.Nil(t, sc.addPeer(peerID),
|
||||
"Adding a peer should return no error")
|
||||
assert.Nil(t, sc.touchPeer(peerID, now),
|
||||
"Touching a peer should return no error")
|
||||
|
||||
threshold := 10 * time.Second
|
||||
assert.Empty(t, sc.peersInactiveSince(threshold, now.Add(9*time.Second)),
|
||||
"Expected no peers to have been touched over 9 seconds")
|
||||
assert.Containsf(t, sc.peersInactiveSince(threshold, now.Add(11*time.Second)), peerID,
|
||||
"Expected one %s to have been touched over 10 seconds ago", peerID)
|
||||
}
|
||||
|
||||
func TestPeerHeight(t *testing.T) {
|
||||
var (
|
||||
initHeight int64 = 5
|
||||
peerID p2p.ID = "1"
|
||||
peerHeight int64 = 20
|
||||
sc = newSchedule(initHeight)
|
||||
)
|
||||
|
||||
assert.NoError(t, sc.addPeer(peerID),
|
||||
"Adding a peer should return no error")
|
||||
assert.NoError(t, sc.setPeerHeight(peerID, peerHeight))
|
||||
for i := initHeight; i <= peerHeight; i++ {
|
||||
assert.Equal(t, sc.getStateAtHeight(i), blockStateNew,
|
||||
"Expected all blocks to be in blockStateNew")
|
||||
peerIDs := []p2p.ID{}
|
||||
for _, peer := range sc.getPeersAtHeight(i) {
|
||||
peerIDs = append(peerIDs, peer.peerID)
|
||||
}
|
||||
|
||||
assert.Containsf(t, peerIDs, peerID,
|
||||
"Expected %s to have block %d", peerID, i)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTransitionPending(t *testing.T) {
|
||||
var (
|
||||
initHeight int64 = 5
|
||||
peerID p2p.ID = "1"
|
||||
peerIDTwo p2p.ID = "2"
|
||||
peerHeight int64 = 20
|
||||
sc = newSchedule(initHeight)
|
||||
now = time.Now()
|
||||
)
|
||||
|
||||
assert.NoError(t, sc.addPeer(peerID),
|
||||
"Adding a peer should return no error")
|
||||
assert.Nil(t, sc.addPeer(peerIDTwo),
|
||||
"Adding a peer should return no error")
|
||||
|
||||
assert.Error(t, sc.markPending(peerID, peerHeight, now),
|
||||
"Expected scheduling a block from a peer in peerStateNew to fail")
|
||||
|
||||
assert.NoError(t, sc.setPeerHeight(peerID, peerHeight),
|
||||
"Expected setPeerHeight to return no error")
|
||||
assert.NoError(t, sc.setPeerHeight(peerIDTwo, peerHeight),
|
||||
"Expected setPeerHeight to return no error")
|
||||
|
||||
assert.NoError(t, sc.markPending(peerID, peerHeight, now),
|
||||
"Expected markingPending new block to succeed")
|
||||
assert.Error(t, sc.markPending(peerIDTwo, peerHeight, now),
|
||||
"Expected markingPending by a second peer to fail")
|
||||
|
||||
assert.Equal(t, blockStatePending, sc.getStateAtHeight(peerHeight),
|
||||
"Expected the block to to be in blockStatePending")
|
||||
|
||||
assert.NoError(t, sc.removePeer(peerID),
|
||||
"Expected removePeer to return no error")
|
||||
|
||||
assert.Equal(t, blockStateNew, sc.getStateAtHeight(peerHeight),
|
||||
"Expected the block to to be in blockStateNew")
|
||||
|
||||
assert.Error(t, sc.markPending(peerID, peerHeight, now),
|
||||
"Expected markingPending removed peer to fail")
|
||||
|
||||
assert.NoError(t, sc.markPending(peerIDTwo, peerHeight, now),
|
||||
"Expected markingPending on a ready peer to succeed")
|
||||
|
||||
assert.Equal(t, blockStatePending, sc.getStateAtHeight(peerHeight),
|
||||
"Expected the block to to be in blockStatePending")
|
||||
}
|
||||
|
||||
func TestTransitionReceived(t *testing.T) {
|
||||
var (
|
||||
initHeight int64 = 5
|
||||
peerID p2p.ID = "1"
|
||||
peerIDTwo p2p.ID = "2"
|
||||
peerHeight int64 = 20
|
||||
blockSize int64 = 1024
|
||||
sc = newSchedule(initHeight)
|
||||
now = time.Now()
|
||||
receivedAt = now.Add(1 * time.Second)
|
||||
)
|
||||
|
||||
assert.NoError(t, sc.addPeer(peerID),
|
||||
"Expected adding peer %s to succeed", peerID)
|
||||
assert.NoError(t, sc.addPeer(peerIDTwo),
|
||||
"Expected adding peer %s to succeed", peerIDTwo)
|
||||
assert.NoError(t, sc.setPeerHeight(peerID, peerHeight),
|
||||
"Expected setPeerHeight to return no error")
|
||||
assert.NoErrorf(t, sc.setPeerHeight(peerIDTwo, peerHeight),
|
||||
"Expected setPeerHeight on %s to %d to succeed", peerIDTwo, peerHeight)
|
||||
assert.NoError(t, sc.markPending(peerID, initHeight, now),
|
||||
"Expected markingPending new block to succeed")
|
||||
|
||||
assert.Error(t, sc.markReceived(peerIDTwo, initHeight, blockSize, receivedAt),
|
||||
"Expected marking markReceived from a non requesting peer to fail")
|
||||
|
||||
assert.NoError(t, sc.markReceived(peerID, initHeight, blockSize, receivedAt),
|
||||
"Expected marking markReceived on a pending block to succeed")
|
||||
|
||||
assert.Error(t, sc.markReceived(peerID, initHeight, blockSize, receivedAt),
|
||||
"Expected marking markReceived on received block to fail")
|
||||
|
||||
assert.Equalf(t, blockStateReceived, sc.getStateAtHeight(initHeight),
|
||||
"Expected block %d to be blockHeightReceived", initHeight)
|
||||
|
||||
assert.NoErrorf(t, sc.removePeer(peerID),
|
||||
"Expected removePeer removing %s to succeed", peerID)
|
||||
|
||||
assert.Equalf(t, blockStateNew, sc.getStateAtHeight(initHeight),
|
||||
"Expected block %d to be blockStateNew", initHeight)
|
||||
|
||||
assert.NoErrorf(t, sc.markPending(peerIDTwo, initHeight, now),
|
||||
"Expected markingPending %d from %s to succeed", initHeight, peerIDTwo)
|
||||
assert.NoErrorf(t, sc.markReceived(peerIDTwo, initHeight, blockSize, receivedAt),
|
||||
"Expected marking markReceived %d from %s to succeed", initHeight, peerIDTwo)
|
||||
assert.Equalf(t, blockStateReceived, sc.getStateAtHeight(initHeight),
|
||||
"Expected block %d to be blockStateReceived", initHeight)
|
||||
}
|
||||
|
||||
func TestTransitionProcessed(t *testing.T) {
|
||||
var (
|
||||
initHeight int64 = 5
|
||||
peerID p2p.ID = "1"
|
||||
peerHeight int64 = 20
|
||||
blockSize int64 = 1024
|
||||
sc = newSchedule(initHeight)
|
||||
now = time.Now()
|
||||
receivedAt = now.Add(1 * time.Second)
|
||||
)
|
||||
|
||||
assert.NoError(t, sc.addPeer(peerID),
|
||||
"Expected adding peer %s to succeed", peerID)
|
||||
assert.NoErrorf(t, sc.setPeerHeight(peerID, peerHeight),
|
||||
"Expected setPeerHeight on %s to %d to succeed", peerID, peerHeight)
|
||||
assert.NoError(t, sc.markPending(peerID, initHeight, now),
|
||||
"Expected markingPending new block to succeed")
|
||||
assert.NoError(t, sc.markReceived(peerID, initHeight, blockSize, receivedAt),
|
||||
"Expected marking markReceived on a pending block to succeed")
|
||||
|
||||
assert.Error(t, sc.markProcessed(initHeight+1),
|
||||
"Expected marking %d as processed to fail", initHeight+1)
|
||||
assert.NoError(t, sc.markProcessed(initHeight),
|
||||
"Expected marking %d as processed to succeed", initHeight)
|
||||
|
||||
assert.Equalf(t, blockStateProcessed, sc.getStateAtHeight(initHeight),
|
||||
"Expected block %d to be blockStateProcessed", initHeight)
|
||||
|
||||
assert.NoError(t, sc.removePeer(peerID),
|
||||
"Expected removing peer %s to succeed", peerID)
|
||||
|
||||
assert.Equalf(t, blockStateProcessed, sc.getStateAtHeight(initHeight),
|
||||
"Expected block %d to be blockStateProcessed", initHeight)
|
||||
}
|
||||
|
||||
func TestMinMaxHeight(t *testing.T) {
|
||||
var (
|
||||
initHeight int64 = 5
|
||||
peerID p2p.ID = "1"
|
||||
peerHeight int64 = 20
|
||||
sc = newSchedule(initHeight)
|
||||
now = time.Now()
|
||||
)
|
||||
|
||||
assert.Equal(t, initHeight, sc.minHeight(),
|
||||
"Expected min height to be the initialized height")
|
||||
|
||||
assert.Equal(t, initHeight, sc.maxHeight(),
|
||||
"Expected max height to be the initialized height")
|
||||
|
||||
assert.NoError(t, sc.addPeer(peerID),
|
||||
"Adding a peer should return no error")
|
||||
|
||||
assert.NoError(t, sc.setPeerHeight(peerID, peerHeight),
|
||||
"Expected setPeerHeight to return no error")
|
||||
|
||||
assert.Equal(t, peerHeight, sc.maxHeight(),
|
||||
"Expected max height to increase to peerHeight")
|
||||
|
||||
assert.Nil(t, sc.markPending(peerID, initHeight, now.Add(1*time.Second)),
|
||||
"Expected marking initHeight as pending to return no error")
|
||||
|
||||
assert.Equal(t, initHeight+1, sc.minHeight(),
|
||||
"Expected marking initHeight as pending to move minHeight forward")
|
||||
}
|
||||
|
||||
func TestPeersSlowerThan(t *testing.T) {
|
||||
var (
|
||||
initHeight int64 = 5
|
||||
peerID p2p.ID = "1"
|
||||
peerHeight int64 = 20
|
||||
blockSize int64 = 1024
|
||||
sc = newSchedule(initHeight)
|
||||
now = time.Now()
|
||||
receivedAt = now.Add(1 * time.Second)
|
||||
)
|
||||
|
||||
assert.NoError(t, sc.addPeer(peerID),
|
||||
"Adding a peer should return no error")
|
||||
|
||||
assert.NoError(t, sc.setPeerHeight(peerID, peerHeight),
|
||||
"Expected setPeerHeight to return no error")
|
||||
|
||||
assert.NoError(t, sc.markPending(peerID, peerHeight, now),
|
||||
"Expected markingPending on to return no error")
|
||||
|
||||
assert.NoError(t, sc.markReceived(peerID, peerHeight, blockSize, receivedAt),
|
||||
"Expected markingPending on to return no error")
|
||||
|
||||
assert.Empty(t, sc.peersSlowerThan(blockSize-1),
|
||||
"expected no peers to be slower than blockSize-1 bytes/sec")
|
||||
|
||||
assert.Containsf(t, sc.peersSlowerThan(blockSize+1), peerID,
|
||||
"expected %s to be slower than blockSize+1 bytes/sec", peerID)
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
package v2
|
||||
|
||||
import (
|
||||
"github.com/Workiva/go-datastructures/queue"
|
||||
)
|
||||
|
||||
type Event queue.Item
|
||||
|
||||
type priority interface {
|
||||
Compare(other queue.Item) int
|
||||
Priority() int
|
||||
}
|
||||
|
||||
type priorityLow struct{}
|
||||
type priorityNormal struct{}
|
||||
type priorityHigh struct{}
|
||||
|
||||
func (p priorityLow) Priority() int {
|
||||
return 1
|
||||
}
|
||||
|
||||
func (p priorityNormal) Priority() int {
|
||||
return 2
|
||||
}
|
||||
|
||||
func (p priorityHigh) Priority() int {
|
||||
return 3
|
||||
}
|
||||
|
||||
func (p priorityLow) Compare(other queue.Item) int {
|
||||
op := other.(priority)
|
||||
if p.Priority() > op.Priority() {
|
||||
return 1
|
||||
} else if p.Priority() == op.Priority() {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (p priorityNormal) Compare(other queue.Item) int {
|
||||
op := other.(priority)
|
||||
if p.Priority() > op.Priority() {
|
||||
return 1
|
||||
} else if p.Priority() == op.Priority() {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
func (p priorityHigh) Compare(other queue.Item) int {
|
||||
op := other.(priority)
|
||||
if p.Priority() > op.Priority() {
|
||||
return 1
|
||||
} else if p.Priority() == op.Priority() {
|
||||
return 0
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
type noOpEvent struct {
|
||||
priorityLow
|
||||
}
|
||||
|
||||
var noOp = noOpEvent{}
|
||||
@@ -1,7 +1,7 @@
|
||||
package v0
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/snikch/goodman/hooks"
|
||||
"github.com/snikch/goodman/transaction"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// This must be compiled beforehand and given to dredd as parameter, in the meantime the server should be running
|
||||
h := hooks.NewHooks()
|
||||
server := hooks.NewServer(hooks.NewHooksRunner(h))
|
||||
h.BeforeAll(func(t []*transaction.Transaction) {
|
||||
fmt.Println(t[0].Name)
|
||||
})
|
||||
h.BeforeEach(func(t *transaction.Transaction) {
|
||||
if strings.HasPrefix(t.Name, "Tx") ||
|
||||
// We need a proper example of evidence to broadcast
|
||||
strings.HasPrefix(t.Name, "Info > /broadcast_evidence") ||
|
||||
// We need a proper example of path and data
|
||||
strings.HasPrefix(t.Name, "ABCI > /abci_query") ||
|
||||
// We need to find a way to make a transaction before starting the tests,
|
||||
// that hash should replace the dummy one in hte swagger file
|
||||
strings.HasPrefix(t.Name, "Info > /tx") {
|
||||
t.Skip = true
|
||||
fmt.Printf("%s Has been skipped\n", t.Name)
|
||||
}
|
||||
})
|
||||
server.Serve()
|
||||
defer server.Listener.Close()
|
||||
fmt.Print("FINE")
|
||||
}
|
||||
@@ -3,7 +3,6 @@ package main
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
@@ -14,10 +13,9 @@ import (
|
||||
|
||||
func main() {
|
||||
var (
|
||||
addr = flag.String("addr", ":26659", "Address of client to connect to")
|
||||
chainID = flag.String("chain-id", "mychain", "chain id")
|
||||
privValKeyPath = flag.String("priv-key", "", "priv val key file path")
|
||||
privValStatePath = flag.String("priv-state", "", "priv val state file path")
|
||||
addr = flag.String("addr", ":26659", "Address of client to connect to")
|
||||
chainID = flag.String("chain-id", "mychain", "chain id")
|
||||
privValPath = flag.String("priv", "", "priv val file path")
|
||||
|
||||
logger = log.NewTMLogger(
|
||||
log.NewSyncWriter(os.Stdout),
|
||||
@@ -29,41 +27,27 @@ func main() {
|
||||
"Starting private validator",
|
||||
"addr", *addr,
|
||||
"chainID", *chainID,
|
||||
"privKeyPath", *privValKeyPath,
|
||||
"privStatePath", *privValStatePath,
|
||||
"privPath", *privValPath,
|
||||
)
|
||||
|
||||
pv := privval.LoadFilePV(*privValKeyPath, *privValStatePath)
|
||||
pv := privval.LoadFilePV(*privValPath)
|
||||
|
||||
var dialer privval.SocketDialer
|
||||
protocol, address := cmn.ProtocolAndAddress(*addr)
|
||||
switch protocol {
|
||||
case "unix":
|
||||
dialer = privval.DialUnixFn(address)
|
||||
case "tcp":
|
||||
connTimeout := 3 * time.Second // TODO
|
||||
dialer = privval.DialTCPFn(address, connTimeout, ed25519.GenPrivKey())
|
||||
default:
|
||||
logger.Error("Unknown protocol", "protocol", protocol)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
sd := privval.NewSignerDialerEndpoint(logger, dialer)
|
||||
ss := privval.NewSignerServer(sd, *chainID, pv)
|
||||
|
||||
err := ss.Start()
|
||||
rs := privval.NewRemoteSigner(
|
||||
logger,
|
||||
*chainID,
|
||||
*addr,
|
||||
pv,
|
||||
ed25519.GenPrivKey(),
|
||||
)
|
||||
err := rs.Start()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
cmn.TrapSignal(logger, func() {
|
||||
err := ss.Stop()
|
||||
cmn.TrapSignal(func() {
|
||||
err := rs.Stop()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ var GenValidatorCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func genValidator(cmd *cobra.Command, args []string) {
|
||||
pv := privval.GenFilePV("", "")
|
||||
pv := privval.GenFilePV("")
|
||||
jsbz, err := cdc.MarshalJSON(pv)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/p2p"
|
||||
@@ -25,18 +26,15 @@ func initFiles(cmd *cobra.Command, args []string) error {
|
||||
|
||||
func initFilesWithConfig(config *cfg.Config) error {
|
||||
// private validator
|
||||
privValKeyFile := config.PrivValidatorKeyFile()
|
||||
privValStateFile := config.PrivValidatorStateFile()
|
||||
privValFile := config.PrivValidatorFile()
|
||||
var pv *privval.FilePV
|
||||
if cmn.FileExists(privValKeyFile) {
|
||||
pv = privval.LoadFilePV(privValKeyFile, privValStateFile)
|
||||
logger.Info("Found private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
if cmn.FileExists(privValFile) {
|
||||
pv = privval.LoadFilePV(privValFile)
|
||||
logger.Info("Found private validator", "path", privValFile)
|
||||
} else {
|
||||
pv = privval.GenFilePV(privValKeyFile, privValStateFile)
|
||||
pv = privval.GenFilePV(privValFile)
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
logger.Info("Generated private validator", "path", privValFile)
|
||||
}
|
||||
|
||||
nodeKeyFile := config.NodeKeyFile()
|
||||
@@ -59,10 +57,9 @@ func initFilesWithConfig(config *cfg.Config) error {
|
||||
GenesisTime: tmtime.Now(),
|
||||
ConsensusParams: types.DefaultConsensusParams(),
|
||||
}
|
||||
key := pv.GetPubKey()
|
||||
genDoc.Validators = []types.GenesisValidator{{
|
||||
Address: key.Address(),
|
||||
PubKey: key,
|
||||
Address: pv.GetPubKey().Address(),
|
||||
PubKey: pv.GetPubKey(),
|
||||
Power: 10,
|
||||
}}
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
@@ -44,7 +43,7 @@ func init() {
|
||||
LiteCmd.Flags().IntVar(&cacheSize, "cache-size", 10, "Specify the memory trust store cache size")
|
||||
}
|
||||
|
||||
func EnsureAddrHasSchemeOrDefaultToTCP(addr string) (string, error) {
|
||||
func ensureAddrHasSchemeOrDefaultToTCP(addr string) (string, error) {
|
||||
u, err := url.Parse(addr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
@@ -60,16 +59,11 @@ func EnsureAddrHasSchemeOrDefaultToTCP(addr string) (string, error) {
|
||||
}
|
||||
|
||||
func runProxy(cmd *cobra.Command, args []string) error {
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
cmn.TrapSignal(logger, func() {
|
||||
// TODO: close up shop
|
||||
})
|
||||
|
||||
nodeAddr, err := EnsureAddrHasSchemeOrDefaultToTCP(nodeAddr)
|
||||
nodeAddr, err := ensureAddrHasSchemeOrDefaultToTCP(nodeAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
listenAddr, err := EnsureAddrHasSchemeOrDefaultToTCP(listenAddr)
|
||||
listenAddr, err := ensureAddrHasSchemeOrDefaultToTCP(listenAddr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -81,7 +75,7 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
logger.Info("Constructing Verifier...")
|
||||
cert, err := proxy.NewVerifier(chainID, home, node, logger, cacheSize)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "constructing Verifier")
|
||||
return cmn.ErrorWrap(err, "constructing Verifier")
|
||||
}
|
||||
cert.SetLogger(logger)
|
||||
sc := proxy.SecureClient(node, cert)
|
||||
@@ -89,9 +83,12 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
logger.Info("Starting proxy...")
|
||||
err = proxy.StartProxy(sc, listenAddr, logger, maxOpenConnections)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "starting proxy")
|
||||
return cmn.ErrorWrap(err, "starting proxy")
|
||||
}
|
||||
|
||||
// Run forever
|
||||
select {}
|
||||
cmn.TrapSignal(func() {
|
||||
// TODO: close up shop
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
)
|
||||
@@ -18,12 +17,6 @@ var ResetAllCmd = &cobra.Command{
|
||||
Run: resetAll,
|
||||
}
|
||||
|
||||
var keepAddrBook bool
|
||||
|
||||
func init() {
|
||||
ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "Keep the address book intact")
|
||||
}
|
||||
|
||||
// ResetPrivValidatorCmd resets the private validator files.
|
||||
var ResetPrivValidatorCmd = &cobra.Command{
|
||||
Use: "unsafe_reset_priv_validator",
|
||||
@@ -34,45 +27,36 @@ var ResetPrivValidatorCmd = &cobra.Command{
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetAll(cmd *cobra.Command, args []string) {
|
||||
ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidatorKeyFile(),
|
||||
config.PrivValidatorStateFile(), logger)
|
||||
ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidatorFile(), logger)
|
||||
}
|
||||
|
||||
// XXX: this is totally unsafe.
|
||||
// it's only suitable for testnets.
|
||||
func resetPrivValidator(cmd *cobra.Command, args []string) {
|
||||
resetFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), logger)
|
||||
resetFilePV(config.PrivValidatorFile(), logger)
|
||||
}
|
||||
|
||||
// ResetAll removes address book files plus all data, and resets the privValdiator data.
|
||||
// ResetAll removes the privValidator and address book files plus all data.
|
||||
// Exported so other CLI tools can use it.
|
||||
func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logger log.Logger) {
|
||||
if keepAddrBook {
|
||||
logger.Info("The address book remains intact")
|
||||
} else {
|
||||
removeAddrBook(addrBookFile, logger)
|
||||
}
|
||||
func ResetAll(dbDir, addrBookFile, privValFile string, logger log.Logger) {
|
||||
resetFilePV(privValFile, logger)
|
||||
removeAddrBook(addrBookFile, logger)
|
||||
if err := os.RemoveAll(dbDir); err == nil {
|
||||
logger.Info("Removed all blockchain history", "dir", dbDir)
|
||||
} else {
|
||||
logger.Error("Error removing all blockchain history", "dir", dbDir, "err", err)
|
||||
}
|
||||
// recreate the dbDir since the privVal state needs to live there
|
||||
cmn.EnsureDir(dbDir, 0700)
|
||||
resetFilePV(privValKeyFile, privValStateFile, logger)
|
||||
}
|
||||
|
||||
func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) {
|
||||
if _, err := os.Stat(privValKeyFile); err == nil {
|
||||
pv := privval.LoadFilePVEmptyState(privValKeyFile, privValStateFile)
|
||||
func resetFilePV(privValFile string, logger log.Logger) {
|
||||
if _, err := os.Stat(privValFile); err == nil {
|
||||
pv := privval.LoadFilePV(privValFile)
|
||||
pv.Reset()
|
||||
logger.Info("Reset private validator file to genesis state", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
logger.Info("Reset private validator file to genesis state", "file", privValFile)
|
||||
} else {
|
||||
pv := privval.GenFilePV(privValKeyFile, privValStateFile)
|
||||
pv := privval.GenFilePV(privValFile)
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator file", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
logger.Info("Generated private validator file", "file", privValFile)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -54,9 +54,6 @@ var RootCmd = &cobra.Command{
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if config.LogFormat == cfg.LogFormatJSON {
|
||||
logger = log.NewTMJSONLogger(log.NewSyncWriter(os.Stdout))
|
||||
}
|
||||
logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel())
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
@@ -22,6 +22,10 @@ var (
|
||||
defaultRoot = os.ExpandEnv("$HOME/.some/test/dir")
|
||||
)
|
||||
|
||||
const (
|
||||
rootName = "root"
|
||||
)
|
||||
|
||||
// clearConfig clears env vars, the given root dir, and resets viper.
|
||||
func clearConfig(dir string) {
|
||||
if err := os.Unsetenv("TMHOME"); err != nil {
|
||||
@@ -165,7 +169,7 @@ func TestRootConfig(t *testing.T) {
|
||||
func WriteConfigVals(dir string, vals map[string]string) error {
|
||||
data := ""
|
||||
for k, v := range vals {
|
||||
data += fmt.Sprintf("%s = \"%s\"\n", k, v)
|
||||
data = data + fmt.Sprintf("%s = \"%s\"\n", k, v)
|
||||
}
|
||||
cfile := filepath.Join(dir, "config.toml")
|
||||
return ioutil.WriteFile(cfile, []byte(data), 0666)
|
||||
|
||||
@@ -2,10 +2,12 @@ package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
nm "github.com/tendermint/tendermint/node"
|
||||
)
|
||||
|
||||
@@ -19,10 +21,10 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
cmd.Flags().String("priv_validator_laddr", config.PrivValidatorListenAddr, "Socket address to listen on for connections from external priv_validator process")
|
||||
|
||||
// node flags
|
||||
cmd.Flags().Bool("fast_sync", config.FastSyncMode, "Fast blockchain syncing")
|
||||
cmd.Flags().Bool("fast_sync", config.FastSync, "Fast blockchain syncing")
|
||||
|
||||
// abci flags
|
||||
cmd.Flags().String("proxy_app", config.ProxyApp, "Proxy app address, or one of: 'kvstore', 'persistent_kvstore', 'counter', 'counter_serial' or 'noop' for local testing.")
|
||||
cmd.Flags().String("proxy_app", config.ProxyApp, "Proxy app address, or 'nilapp' or 'kvstore' for local testing.")
|
||||
cmd.Flags().String("abci", config.ABCI, "Specify abci transport (socket | grpc)")
|
||||
|
||||
// rpc flags
|
||||
@@ -55,20 +57,28 @@ func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command {
|
||||
return fmt.Errorf("Failed to create node: %v", err)
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
cmn.TrapSignal(logger, func() {
|
||||
if n.IsRunning() {
|
||||
n.Stop()
|
||||
// Stop upon receiving SIGTERM or CTRL-C
|
||||
c := make(chan os.Signal, 1)
|
||||
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
|
||||
go func() {
|
||||
for sig := range c {
|
||||
logger.Error(fmt.Sprintf("captured %v, exiting...", sig))
|
||||
if n.IsRunning() {
|
||||
n.Stop()
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
})
|
||||
}()
|
||||
|
||||
if err := n.Start(); err != nil {
|
||||
return fmt.Errorf("Failed to start node: %v", err)
|
||||
}
|
||||
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
|
||||
|
||||
// Run forever.
|
||||
// Run forever
|
||||
select {}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -16,11 +16,12 @@ var ShowNodeIDCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func showNodeID(cmd *cobra.Command, args []string) error {
|
||||
|
||||
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Println(nodeKey.ID())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -3,10 +3,8 @@ package commands
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
)
|
||||
|
||||
@@ -14,21 +12,11 @@ import (
|
||||
var ShowValidatorCmd = &cobra.Command{
|
||||
Use: "show_validator",
|
||||
Short: "Show this node's validator info",
|
||||
RunE: showValidator,
|
||||
Run: showValidator,
|
||||
}
|
||||
|
||||
func showValidator(cmd *cobra.Command, args []string) error {
|
||||
keyFilePath := config.PrivValidatorKeyFile()
|
||||
if !cmn.FileExists(keyFilePath) {
|
||||
return fmt.Errorf("private validator file %s does not exist", keyFilePath)
|
||||
}
|
||||
|
||||
pv := privval.LoadFilePV(keyFilePath, config.PrivValidatorStateFile())
|
||||
bz, err := cdc.MarshalJSON(pv.GetPubKey())
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to marshal private validator pubkey")
|
||||
}
|
||||
|
||||
fmt.Println(string(bz))
|
||||
return nil
|
||||
func showValidator(cmd *cobra.Command, args []string) {
|
||||
privValidator := privval.LoadOrGenFilePV(config.PrivValidatorFile())
|
||||
pubKeyJSONBytes, _ := cdc.MarshalJSON(privValidator.GetPubKey())
|
||||
fmt.Println(string(pubKeyJSONBytes))
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
@@ -21,17 +20,13 @@ import (
|
||||
var (
|
||||
nValidators int
|
||||
nNonValidators int
|
||||
configFile string
|
||||
outputDir string
|
||||
nodeDirPrefix string
|
||||
|
||||
populatePersistentPeers bool
|
||||
hostnamePrefix string
|
||||
hostnameSuffix string
|
||||
startingIPAddress string
|
||||
hostnames []string
|
||||
p2pPort int
|
||||
randomMonikers bool
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -41,8 +36,6 @@ const (
|
||||
func init() {
|
||||
TestnetFilesCmd.Flags().IntVar(&nValidators, "v", 4,
|
||||
"Number of validators to initialize the testnet with")
|
||||
TestnetFilesCmd.Flags().StringVar(&configFile, "config", "",
|
||||
"Config file to use (note some options may be overwritten)")
|
||||
TestnetFilesCmd.Flags().IntVar(&nNonValidators, "n", 0,
|
||||
"Number of non-validators to initialize the testnet with")
|
||||
TestnetFilesCmd.Flags().StringVar(&outputDir, "o", "./mytestnet",
|
||||
@@ -53,17 +46,11 @@ func init() {
|
||||
TestnetFilesCmd.Flags().BoolVar(&populatePersistentPeers, "populate-persistent-peers", true,
|
||||
"Update config of each node with the list of persistent peers build using either hostname-prefix or starting-ip-address")
|
||||
TestnetFilesCmd.Flags().StringVar(&hostnamePrefix, "hostname-prefix", "node",
|
||||
"Hostname prefix (\"node\" results in persistent peers list ID0@node0:26656, ID1@node1:26656, ...)")
|
||||
TestnetFilesCmd.Flags().StringVar(&hostnameSuffix, "hostname-suffix", "",
|
||||
"Hostname suffix (\".xyz.com\" results in persistent peers list ID0@node0.xyz.com:26656, ID1@node1.xyz.com:26656, ...)")
|
||||
"Hostname prefix (node results in persistent peers list ID0@node0:26656, ID1@node1:26656, ...)")
|
||||
TestnetFilesCmd.Flags().StringVar(&startingIPAddress, "starting-ip-address", "",
|
||||
"Starting IP address (\"192.168.0.1\" results in persistent peers list ID0@192.168.0.1:26656, ID1@192.168.0.2:26656, ...)")
|
||||
TestnetFilesCmd.Flags().StringArrayVar(&hostnames, "hostname", []string{},
|
||||
"Manually override all hostnames of validators and non-validators (use --hostname multiple times for multiple hosts)")
|
||||
"Starting IP address (192.168.0.1 results in persistent peers list ID0@192.168.0.1:26656, ID1@192.168.0.2:26656, ...)")
|
||||
TestnetFilesCmd.Flags().IntVar(&p2pPort, "p2p-port", 26656,
|
||||
"P2P Port")
|
||||
TestnetFilesCmd.Flags().BoolVar(&randomMonikers, "random-monikers", false,
|
||||
"Randomize the moniker for each generated node")
|
||||
}
|
||||
|
||||
// TestnetFilesCmd allows initialisation of files for a Tendermint testnet.
|
||||
@@ -85,29 +72,7 @@ Example:
|
||||
}
|
||||
|
||||
func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
if len(hostnames) > 0 && len(hostnames) != (nValidators+nNonValidators) {
|
||||
return fmt.Errorf(
|
||||
"testnet needs precisely %d hostnames (number of validators plus non-validators) if --hostname parameter is used",
|
||||
nValidators+nNonValidators,
|
||||
)
|
||||
}
|
||||
|
||||
config := cfg.DefaultConfig()
|
||||
|
||||
// overwrite default config if set and valid
|
||||
if configFile != "" {
|
||||
viper.SetConfigFile(configFile)
|
||||
if err := viper.ReadInConfig(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := viper.Unmarshal(config); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := config.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
genVals := make([]types.GenesisValidator, nValidators)
|
||||
|
||||
for i := 0; i < nValidators; i++ {
|
||||
@@ -120,18 +85,11 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
_ = os.RemoveAll(outputDir)
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(filepath.Join(nodeDir, "data"), nodeDirPerm)
|
||||
if err != nil {
|
||||
_ = os.RemoveAll(outputDir)
|
||||
return err
|
||||
}
|
||||
|
||||
initFilesWithConfig(config)
|
||||
|
||||
pvKeyFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidatorKey)
|
||||
pvStateFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidatorState)
|
||||
|
||||
pv := privval.LoadFilePV(pvKeyFile, pvStateFile)
|
||||
pvFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidator)
|
||||
pv := privval.LoadFilePV(pvFile)
|
||||
genVals[i] = types.GenesisValidator{
|
||||
Address: pv.GetPubKey().Address(),
|
||||
PubKey: pv.GetPubKey(),
|
||||
@@ -150,21 +108,14 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.MkdirAll(filepath.Join(nodeDir, "data"), nodeDirPerm)
|
||||
if err != nil {
|
||||
_ = os.RemoveAll(outputDir)
|
||||
return err
|
||||
}
|
||||
|
||||
initFilesWithConfig(config)
|
||||
}
|
||||
|
||||
// Generate genesis doc from generated validators
|
||||
genDoc := &types.GenesisDoc{
|
||||
ChainID: "chain-" + cmn.RandStr(6),
|
||||
ConsensusParams: types.DefaultConsensusParams(),
|
||||
GenesisTime: tmtime.Now(),
|
||||
Validators: genVals,
|
||||
GenesisTime: tmtime.Now(),
|
||||
ChainID: "chain-" + cmn.RandStr(6),
|
||||
Validators: genVals,
|
||||
}
|
||||
|
||||
// Write genesis file.
|
||||
@@ -176,84 +127,58 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Gather persistent peer addresses.
|
||||
var (
|
||||
persistentPeers string
|
||||
err error
|
||||
)
|
||||
if populatePersistentPeers {
|
||||
persistentPeers, err = persistentPeersString(config)
|
||||
err := populatePersistentPeersInConfigAndWriteIt(config)
|
||||
if err != nil {
|
||||
_ = os.RemoveAll(outputDir)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Overwrite default config.
|
||||
for i := 0; i < nValidators+nNonValidators; i++ {
|
||||
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
|
||||
config.SetRoot(nodeDir)
|
||||
config.P2P.AddrBookStrict = false
|
||||
config.P2P.AllowDuplicateIP = true
|
||||
if populatePersistentPeers {
|
||||
config.P2P.PersistentPeers = persistentPeers
|
||||
}
|
||||
config.Moniker = moniker(i)
|
||||
|
||||
cfg.WriteConfigFile(filepath.Join(nodeDir, "config", "config.toml"), config)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully initialized %v node directories\n", nValidators+nNonValidators)
|
||||
return nil
|
||||
}
|
||||
|
||||
func hostnameOrIP(i int) string {
|
||||
if len(hostnames) > 0 && i < len(hostnames) {
|
||||
return hostnames[i]
|
||||
}
|
||||
if startingIPAddress == "" {
|
||||
return fmt.Sprintf("%s%d%s", hostnamePrefix, i, hostnameSuffix)
|
||||
}
|
||||
ip := net.ParseIP(startingIPAddress)
|
||||
ip = ip.To4()
|
||||
if ip == nil {
|
||||
fmt.Printf("%v: non ipv4 address\n", startingIPAddress)
|
||||
os.Exit(1)
|
||||
if startingIPAddress != "" {
|
||||
ip := net.ParseIP(startingIPAddress)
|
||||
ip = ip.To4()
|
||||
if ip == nil {
|
||||
fmt.Printf("%v: non ipv4 address\n", startingIPAddress)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
for j := 0; j < i; j++ {
|
||||
ip[3]++
|
||||
}
|
||||
return ip.String()
|
||||
}
|
||||
|
||||
for j := 0; j < i; j++ {
|
||||
ip[3]++
|
||||
}
|
||||
return ip.String()
|
||||
return fmt.Sprintf("%s%d", hostnamePrefix, i)
|
||||
}
|
||||
|
||||
func persistentPeersString(config *cfg.Config) (string, error) {
|
||||
func populatePersistentPeersInConfigAndWriteIt(config *cfg.Config) error {
|
||||
persistentPeers := make([]string, nValidators+nNonValidators)
|
||||
for i := 0; i < nValidators+nNonValidators; i++ {
|
||||
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
|
||||
config.SetRoot(nodeDir)
|
||||
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
|
||||
if err != nil {
|
||||
return "", err
|
||||
return err
|
||||
}
|
||||
persistentPeers[i] = p2p.IDAddressString(nodeKey.ID(), fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort))
|
||||
}
|
||||
return strings.Join(persistentPeers, ","), nil
|
||||
}
|
||||
persistentPeersList := strings.Join(persistentPeers, ",")
|
||||
|
||||
func moniker(i int) string {
|
||||
if randomMonikers {
|
||||
return randomMoniker()
|
||||
}
|
||||
if len(hostnames) > 0 && i < len(hostnames) {
|
||||
return hostnames[i]
|
||||
}
|
||||
if startingIPAddress == "" {
|
||||
return fmt.Sprintf("%s%d%s", hostnamePrefix, i, hostnameSuffix)
|
||||
}
|
||||
return randomMoniker()
|
||||
}
|
||||
for i := 0; i < nValidators+nNonValidators; i++ {
|
||||
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
|
||||
config.SetRoot(nodeDir)
|
||||
config.P2P.PersistentPeers = persistentPeersList
|
||||
config.P2P.AddrBookStrict = false
|
||||
|
||||
func randomMoniker() string {
|
||||
return cmn.HexBytes(cmn.RandBytes(8)).String()
|
||||
// overwrite default config
|
||||
cfg.WriteConfigFile(filepath.Join(nodeDir, "config", "config.toml"), config)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
amino "github.com/tendermint/go-amino"
|
||||
"github.com/tendermint/go-amino"
|
||||
cryptoAmino "github.com/tendermint/tendermint/crypto/encoding/amino"
|
||||
)
|
||||
|
||||
310
config/config.go
310
config/config.go
@@ -2,7 +2,6 @@ package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
@@ -15,11 +14,6 @@ const (
|
||||
FuzzModeDrop = iota
|
||||
// FuzzModeDelay is a mode in which we randomly sleep
|
||||
FuzzModeDelay
|
||||
|
||||
// LogFormatPlain is a format for colored text
|
||||
LogFormatPlain = "plain"
|
||||
// LogFormatJSON is a format for json output
|
||||
LogFormatJSON = "json"
|
||||
)
|
||||
|
||||
// NOTE: Most of the structs & relevant comments + the
|
||||
@@ -36,24 +30,15 @@ var (
|
||||
defaultConfigFileName = "config.toml"
|
||||
defaultGenesisJSONName = "genesis.json"
|
||||
|
||||
defaultPrivValKeyName = "priv_validator_key.json"
|
||||
defaultPrivValStateName = "priv_validator_state.json"
|
||||
|
||||
defaultPrivValName = "priv_validator.json"
|
||||
defaultNodeKeyName = "node_key.json"
|
||||
defaultAddrBookName = "addrbook.json"
|
||||
|
||||
defaultConfigFilePath = filepath.Join(defaultConfigDir, defaultConfigFileName)
|
||||
defaultGenesisJSONPath = filepath.Join(defaultConfigDir, defaultGenesisJSONName)
|
||||
defaultPrivValKeyPath = filepath.Join(defaultConfigDir, defaultPrivValKeyName)
|
||||
defaultPrivValStatePath = filepath.Join(defaultDataDir, defaultPrivValStateName)
|
||||
|
||||
defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName)
|
||||
defaultAddrBookPath = filepath.Join(defaultConfigDir, defaultAddrBookName)
|
||||
)
|
||||
|
||||
var (
|
||||
oldPrivVal = "priv_validator.json"
|
||||
oldPrivValPath = filepath.Join(defaultConfigDir, oldPrivVal)
|
||||
defaultConfigFilePath = filepath.Join(defaultConfigDir, defaultConfigFileName)
|
||||
defaultGenesisJSONPath = filepath.Join(defaultConfigDir, defaultGenesisJSONName)
|
||||
defaultPrivValPath = filepath.Join(defaultConfigDir, defaultPrivValName)
|
||||
defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName)
|
||||
defaultAddrBookPath = filepath.Join(defaultConfigDir, defaultAddrBookName)
|
||||
)
|
||||
|
||||
// Config defines the top level configuration for a Tendermint node
|
||||
@@ -65,7 +50,6 @@ type Config struct {
|
||||
RPC *RPCConfig `mapstructure:"rpc"`
|
||||
P2P *P2PConfig `mapstructure:"p2p"`
|
||||
Mempool *MempoolConfig `mapstructure:"mempool"`
|
||||
FastSync *FastSyncConfig `mapstructure:"fastsync"`
|
||||
Consensus *ConsensusConfig `mapstructure:"consensus"`
|
||||
TxIndex *TxIndexConfig `mapstructure:"tx_index"`
|
||||
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
|
||||
@@ -78,7 +62,6 @@ func DefaultConfig() *Config {
|
||||
RPC: DefaultRPCConfig(),
|
||||
P2P: DefaultP2PConfig(),
|
||||
Mempool: DefaultMempoolConfig(),
|
||||
FastSync: DefaultFastSyncConfig(),
|
||||
Consensus: DefaultConsensusConfig(),
|
||||
TxIndex: DefaultTxIndexConfig(),
|
||||
Instrumentation: DefaultInstrumentationConfig(),
|
||||
@@ -92,7 +75,6 @@ func TestConfig() *Config {
|
||||
RPC: TestRPCConfig(),
|
||||
P2P: TestP2PConfig(),
|
||||
Mempool: TestMempoolConfig(),
|
||||
FastSync: TestFastSyncConfig(),
|
||||
Consensus: TestConsensusConfig(),
|
||||
TxIndex: TestTxIndexConfig(),
|
||||
Instrumentation: TestInstrumentationConfig(),
|
||||
@@ -112,9 +94,6 @@ func (cfg *Config) SetRoot(root string) *Config {
|
||||
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
||||
// returns an error if any check fails.
|
||||
func (cfg *Config) ValidateBasic() error {
|
||||
if err := cfg.BaseConfig.ValidateBasic(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := cfg.RPC.ValidateBasic(); err != nil {
|
||||
return errors.Wrap(err, "Error in [rpc] section")
|
||||
}
|
||||
@@ -124,9 +103,6 @@ func (cfg *Config) ValidateBasic() error {
|
||||
if err := cfg.Mempool.ValidateBasic(); err != nil {
|
||||
return errors.Wrap(err, "Error in [mempool] section")
|
||||
}
|
||||
if err := cfg.FastSync.ValidateBasic(); err != nil {
|
||||
return errors.Wrap(err, "Error in [fastsync] section")
|
||||
}
|
||||
if err := cfg.Consensus.ValidateBasic(); err != nil {
|
||||
return errors.Wrap(err, "Error in [consensus] section")
|
||||
}
|
||||
@@ -158,20 +134,9 @@ type BaseConfig struct {
|
||||
// If this node is many blocks behind the tip of the chain, FastSync
|
||||
// allows them to catchup quickly by downloading blocks in parallel
|
||||
// and verifying their commits
|
||||
FastSyncMode bool `mapstructure:"fast_sync"`
|
||||
FastSync bool `mapstructure:"fast_sync"`
|
||||
|
||||
// Database backend: goleveldb | cleveldb | boltdb
|
||||
// * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
|
||||
// - pure go
|
||||
// - stable
|
||||
// * cleveldb (uses levigo wrapper)
|
||||
// - fast
|
||||
// - requires gcc
|
||||
// - use cleveldb build tag (go build -tags cleveldb)
|
||||
// * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt)
|
||||
// - EXPERIMENTAL
|
||||
// - may be faster is some use-cases (random reads - indexer)
|
||||
// - use boltdb build tag (go build -tags boltdb)
|
||||
// Database backend: leveldb | memdb | cleveldb
|
||||
DBBackend string `mapstructure:"db_backend"`
|
||||
|
||||
// Database directory
|
||||
@@ -180,17 +145,11 @@ type BaseConfig struct {
|
||||
// Output level for logging
|
||||
LogLevel string `mapstructure:"log_level"`
|
||||
|
||||
// Output format: 'plain' (colored text) or 'json'
|
||||
LogFormat string `mapstructure:"log_format"`
|
||||
|
||||
// Path to the JSON file containing the initial validator set and other meta data
|
||||
Genesis string `mapstructure:"genesis_file"`
|
||||
|
||||
// Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
PrivValidatorKey string `mapstructure:"priv_validator_key_file"`
|
||||
|
||||
// Path to the JSON file containing the last sign state of a validator
|
||||
PrivValidatorState string `mapstructure:"priv_validator_state_file"`
|
||||
PrivValidator string `mapstructure:"priv_validator_file"`
|
||||
|
||||
// TCP or UNIX socket address for Tendermint to listen on for
|
||||
// connections from an external PrivValidator process
|
||||
@@ -213,20 +172,18 @@ type BaseConfig struct {
|
||||
// DefaultBaseConfig returns a default base configuration for a Tendermint node
|
||||
func DefaultBaseConfig() BaseConfig {
|
||||
return BaseConfig{
|
||||
Genesis: defaultGenesisJSONPath,
|
||||
PrivValidatorKey: defaultPrivValKeyPath,
|
||||
PrivValidatorState: defaultPrivValStatePath,
|
||||
NodeKey: defaultNodeKeyPath,
|
||||
Moniker: defaultMoniker,
|
||||
ProxyApp: "tcp://127.0.0.1:26658",
|
||||
ABCI: "socket",
|
||||
LogLevel: DefaultPackageLogLevels(),
|
||||
LogFormat: LogFormatPlain,
|
||||
ProfListenAddress: "",
|
||||
FastSyncMode: true,
|
||||
FilterPeers: false,
|
||||
DBBackend: "goleveldb",
|
||||
DBPath: "data",
|
||||
Genesis: defaultGenesisJSONPath,
|
||||
PrivValidator: defaultPrivValPath,
|
||||
NodeKey: defaultNodeKeyPath,
|
||||
Moniker: defaultMoniker,
|
||||
ProxyApp: "tcp://127.0.0.1:26658",
|
||||
ABCI: "socket",
|
||||
LogLevel: DefaultPackageLogLevels(),
|
||||
ProfListenAddress: "",
|
||||
FastSync: true,
|
||||
FilterPeers: false,
|
||||
DBBackend: "leveldb",
|
||||
DBPath: "data",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -235,7 +192,7 @@ func TestBaseConfig() BaseConfig {
|
||||
cfg := DefaultBaseConfig()
|
||||
cfg.chainID = "tendermint_test"
|
||||
cfg.ProxyApp = "kvstore"
|
||||
cfg.FastSyncMode = false
|
||||
cfg.FastSync = false
|
||||
cfg.DBBackend = "memdb"
|
||||
return cfg
|
||||
}
|
||||
@@ -249,20 +206,9 @@ func (cfg BaseConfig) GenesisFile() string {
|
||||
return rootify(cfg.Genesis, cfg.RootDir)
|
||||
}
|
||||
|
||||
// PrivValidatorKeyFile returns the full path to the priv_validator_key.json file
|
||||
func (cfg BaseConfig) PrivValidatorKeyFile() string {
|
||||
return rootify(cfg.PrivValidatorKey, cfg.RootDir)
|
||||
}
|
||||
|
||||
// PrivValidatorFile returns the full path to the priv_validator_state.json file
|
||||
func (cfg BaseConfig) PrivValidatorStateFile() string {
|
||||
return rootify(cfg.PrivValidatorState, cfg.RootDir)
|
||||
}
|
||||
|
||||
// OldPrivValidatorFile returns the full path of the priv_validator.json from pre v0.28.0.
|
||||
// TODO: eventually remove.
|
||||
func (cfg BaseConfig) OldPrivValidatorFile() string {
|
||||
return rootify(oldPrivValPath, cfg.RootDir)
|
||||
// PrivValidatorFile returns the full path to the priv_validator.json file
|
||||
func (cfg BaseConfig) PrivValidatorFile() string {
|
||||
return rootify(cfg.PrivValidator, cfg.RootDir)
|
||||
}
|
||||
|
||||
// NodeKeyFile returns the full path to the node_key.json file
|
||||
@@ -275,17 +221,6 @@ func (cfg BaseConfig) DBDir() string {
|
||||
return rootify(cfg.DBPath, cfg.RootDir)
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
||||
// returns an error if any check fails.
|
||||
func (cfg BaseConfig) ValidateBasic() error {
|
||||
switch cfg.LogFormat {
|
||||
case LogFormatPlain, LogFormatJSON:
|
||||
default:
|
||||
return errors.New("unknown log_format (must be 'plain' or 'json')")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DefaultLogLevel returns a default log level of "error"
|
||||
func DefaultLogLevel() string {
|
||||
return "error"
|
||||
@@ -307,25 +242,13 @@ type RPCConfig struct {
|
||||
// TCP or UNIX socket address for the RPC server to listen on
|
||||
ListenAddress string `mapstructure:"laddr"`
|
||||
|
||||
// A list of origins a cross-domain request can be executed from.
|
||||
// If the special '*' value is present in the list, all origins will be allowed.
|
||||
// An origin may contain a wildcard (*) to replace 0 or more characters (i.e.: http://*.domain.com).
|
||||
// Only one wildcard can be used per origin.
|
||||
CORSAllowedOrigins []string `mapstructure:"cors_allowed_origins"`
|
||||
|
||||
// A list of methods the client is allowed to use with cross-domain requests.
|
||||
CORSAllowedMethods []string `mapstructure:"cors_allowed_methods"`
|
||||
|
||||
// A list of non simple headers the client is allowed to use with cross-domain requests.
|
||||
CORSAllowedHeaders []string `mapstructure:"cors_allowed_headers"`
|
||||
|
||||
// TCP or UNIX socket address for the gRPC server to listen on
|
||||
// NOTE: This server only supports /broadcast_tx_commit
|
||||
GRPCListenAddress string `mapstructure:"grpc_laddr"`
|
||||
|
||||
// Maximum number of simultaneous connections.
|
||||
// Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
|
||||
// If you want to accept a larger number than the default, make sure
|
||||
// If you want to accept more significant number than the default, make sure
|
||||
// you increase your OS limits.
|
||||
// 0 - unlimited.
|
||||
GRPCMaxOpenConnections int `mapstructure:"grpc_max_open_connections"`
|
||||
@@ -335,74 +258,24 @@ type RPCConfig struct {
|
||||
|
||||
// Maximum number of simultaneous connections (including WebSocket).
|
||||
// Does not include gRPC connections. See grpc_max_open_connections
|
||||
// If you want to accept a larger number than the default, make sure
|
||||
// If you want to accept more significant number than the default, make sure
|
||||
// you increase your OS limits.
|
||||
// 0 - unlimited.
|
||||
// Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
|
||||
// 1024 - 40 - 10 - 50 = 924 = ~900
|
||||
MaxOpenConnections int `mapstructure:"max_open_connections"`
|
||||
|
||||
// Maximum number of unique clientIDs that can /subscribe
|
||||
// If you're using /broadcast_tx_commit, set to the estimated maximum number
|
||||
// of broadcast_tx_commit calls per block.
|
||||
MaxSubscriptionClients int `mapstructure:"max_subscription_clients"`
|
||||
|
||||
// Maximum number of unique queries a given client can /subscribe to
|
||||
// If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set
|
||||
// to the estimated maximum number of broadcast_tx_commit calls per block.
|
||||
MaxSubscriptionsPerClient int `mapstructure:"max_subscriptions_per_client"`
|
||||
|
||||
// How long to wait for a tx to be committed during /broadcast_tx_commit
|
||||
// WARNING: Using a value larger than 10s will result in increasing the
|
||||
// global HTTP write timeout, which applies to all connections and endpoints.
|
||||
// See https://github.com/tendermint/tendermint/issues/3435
|
||||
TimeoutBroadcastTxCommit time.Duration `mapstructure:"timeout_broadcast_tx_commit"`
|
||||
|
||||
// Maximum size of request body, in bytes
|
||||
MaxBodyBytes int64 `mapstructure:"max_body_bytes"`
|
||||
|
||||
// Maximum size of request header, in bytes
|
||||
MaxHeaderBytes int `mapstructure:"max_header_bytes"`
|
||||
|
||||
// The path to a file containing certificate that is used to create the HTTPS server.
|
||||
// Migth be either absolute path or path related to tendermint's config directory.
|
||||
//
|
||||
// If the certificate is signed by a certificate authority,
|
||||
// the certFile should be the concatenation of the server's certificate, any intermediates,
|
||||
// and the CA's certificate.
|
||||
//
|
||||
// NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run.
|
||||
TLSCertFile string `mapstructure:"tls_cert_file"`
|
||||
|
||||
// The path to a file containing matching private key that is used to create the HTTPS server.
|
||||
// Migth be either absolute path or path related to tendermint's config directory.
|
||||
//
|
||||
// NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run.
|
||||
TLSKeyFile string `mapstructure:"tls_key_file"`
|
||||
}
|
||||
|
||||
// DefaultRPCConfig returns a default configuration for the RPC server
|
||||
func DefaultRPCConfig() *RPCConfig {
|
||||
return &RPCConfig{
|
||||
ListenAddress: "tcp://127.0.0.1:26657",
|
||||
CORSAllowedOrigins: []string{},
|
||||
CORSAllowedMethods: []string{http.MethodHead, http.MethodGet, http.MethodPost},
|
||||
CORSAllowedHeaders: []string{"Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time"},
|
||||
ListenAddress: "tcp://0.0.0.0:26657",
|
||||
|
||||
GRPCListenAddress: "",
|
||||
GRPCMaxOpenConnections: 900,
|
||||
|
||||
Unsafe: false,
|
||||
MaxOpenConnections: 900,
|
||||
|
||||
MaxSubscriptionClients: 100,
|
||||
MaxSubscriptionsPerClient: 5,
|
||||
TimeoutBroadcastTxCommit: 10 * time.Second,
|
||||
|
||||
MaxBodyBytes: int64(1000000), // 1MB
|
||||
MaxHeaderBytes: 1 << 20, // same as the net/http default
|
||||
|
||||
TLSCertFile: "",
|
||||
TLSKeyFile: "",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -424,49 +297,9 @@ func (cfg *RPCConfig) ValidateBasic() error {
|
||||
if cfg.MaxOpenConnections < 0 {
|
||||
return errors.New("max_open_connections can't be negative")
|
||||
}
|
||||
if cfg.MaxSubscriptionClients < 0 {
|
||||
return errors.New("max_subscription_clients can't be negative")
|
||||
}
|
||||
if cfg.MaxSubscriptionsPerClient < 0 {
|
||||
return errors.New("max_subscriptions_per_client can't be negative")
|
||||
}
|
||||
if cfg.TimeoutBroadcastTxCommit < 0 {
|
||||
return errors.New("timeout_broadcast_tx_commit can't be negative")
|
||||
}
|
||||
if cfg.MaxBodyBytes < 0 {
|
||||
return errors.New("max_body_bytes can't be negative")
|
||||
}
|
||||
if cfg.MaxHeaderBytes < 0 {
|
||||
return errors.New("max_header_bytes can't be negative")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsCorsEnabled returns true if cross-origin resource sharing is enabled.
|
||||
func (cfg *RPCConfig) IsCorsEnabled() bool {
|
||||
return len(cfg.CORSAllowedOrigins) != 0
|
||||
}
|
||||
|
||||
func (cfg RPCConfig) KeyFile() string {
|
||||
path := cfg.TLSKeyFile
|
||||
if filepath.IsAbs(path) {
|
||||
return path
|
||||
}
|
||||
return rootify(filepath.Join(defaultConfigDir, path), cfg.RootDir)
|
||||
}
|
||||
|
||||
func (cfg RPCConfig) CertFile() string {
|
||||
path := cfg.TLSCertFile
|
||||
if filepath.IsAbs(path) {
|
||||
return path
|
||||
}
|
||||
return rootify(filepath.Join(defaultConfigDir, path), cfg.RootDir)
|
||||
}
|
||||
|
||||
func (cfg RPCConfig) IsTLSEnabled() bool {
|
||||
return cfg.TLSCertFile != "" && cfg.TLSKeyFile != ""
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// P2PConfig
|
||||
|
||||
@@ -559,7 +392,7 @@ func DefaultP2PConfig() *P2PConfig {
|
||||
RecvRate: 5120000, // 5 mB/s
|
||||
PexReactor: true,
|
||||
SeedMode: false,
|
||||
AllowDuplicateIP: false,
|
||||
AllowDuplicateIP: true, // so non-breaking yet
|
||||
HandshakeTimeout: 20 * time.Second,
|
||||
DialTimeout: 3 * time.Second,
|
||||
TestDialFail: false,
|
||||
@@ -631,14 +464,12 @@ func DefaultFuzzConnConfig() *FuzzConnConfig {
|
||||
|
||||
// MempoolConfig defines the configuration options for the Tendermint mempool
|
||||
type MempoolConfig struct {
|
||||
RootDir string `mapstructure:"home"`
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
WalPath string `mapstructure:"wal_dir"`
|
||||
Size int `mapstructure:"size"`
|
||||
MaxTxsBytes int64 `mapstructure:"max_txs_bytes"`
|
||||
CacheSize int `mapstructure:"cache_size"`
|
||||
MaxTxBytes int `mapstructure:"max_tx_bytes"`
|
||||
RootDir string `mapstructure:"home"`
|
||||
Recheck bool `mapstructure:"recheck"`
|
||||
Broadcast bool `mapstructure:"broadcast"`
|
||||
WalPath string `mapstructure:"wal_dir"`
|
||||
Size int `mapstructure:"size"`
|
||||
CacheSize int `mapstructure:"cache_size"`
|
||||
}
|
||||
|
||||
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool
|
||||
@@ -647,12 +478,10 @@ func DefaultMempoolConfig() *MempoolConfig {
|
||||
Recheck: true,
|
||||
Broadcast: true,
|
||||
WalPath: "",
|
||||
// Each signature verification takes .5ms, Size reduced until we implement
|
||||
// Each signature verification takes .5ms, size reduced until we implement
|
||||
// ABCI Recheck
|
||||
Size: 5000,
|
||||
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
|
||||
CacheSize: 10000,
|
||||
MaxTxBytes: 1024 * 1024, // 1MB
|
||||
Size: 5000,
|
||||
CacheSize: 10000,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -668,61 +497,18 @@ func (cfg *MempoolConfig) WalDir() string {
|
||||
return rootify(cfg.WalPath, cfg.RootDir)
|
||||
}
|
||||
|
||||
// WalEnabled returns true if the WAL is enabled.
|
||||
func (cfg *MempoolConfig) WalEnabled() bool {
|
||||
return cfg.WalPath != ""
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation (checking param bounds, etc.) and
|
||||
// returns an error if any check fails.
|
||||
func (cfg *MempoolConfig) ValidateBasic() error {
|
||||
if cfg.Size < 0 {
|
||||
return errors.New("size can't be negative")
|
||||
}
|
||||
if cfg.MaxTxsBytes < 0 {
|
||||
return errors.New("max_txs_bytes can't be negative")
|
||||
}
|
||||
if cfg.CacheSize < 0 {
|
||||
return errors.New("cache_size can't be negative")
|
||||
}
|
||||
if cfg.MaxTxBytes < 0 {
|
||||
return errors.New("max_tx_bytes can't be negative")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// FastSyncConfig
|
||||
|
||||
// FastSyncConfig defines the configuration for the Tendermint fast sync service
|
||||
type FastSyncConfig struct {
|
||||
Version string `mapstructure:"version"`
|
||||
}
|
||||
|
||||
// DefaultFastSyncConfig returns a default configuration for the fast sync service
|
||||
func DefaultFastSyncConfig() *FastSyncConfig {
|
||||
return &FastSyncConfig{
|
||||
Version: "v0",
|
||||
}
|
||||
}
|
||||
|
||||
// TestFastSyncConfig returns a default configuration for the fast sync.
|
||||
func TestFastSyncConfig() *FastSyncConfig {
|
||||
return DefaultFastSyncConfig()
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
func (cfg *FastSyncConfig) ValidateBasic() error {
|
||||
switch cfg.Version {
|
||||
case "v0":
|
||||
return nil
|
||||
case "v1":
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("unknown fastsync version %s", cfg.Version)
|
||||
}
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// ConsensusConfig
|
||||
|
||||
@@ -751,6 +537,9 @@ type ConsensusConfig struct {
|
||||
// Reactor sleep duration parameters
|
||||
PeerGossipSleepDuration time.Duration `mapstructure:"peer_gossip_sleep_duration"`
|
||||
PeerQueryMaj23SleepDuration time.Duration `mapstructure:"peer_query_maj23_sleep_duration"`
|
||||
|
||||
// Block time parameters. Corresponds to the minimum time increment between consecutive blocks.
|
||||
BlockTimeIota time.Duration `mapstructure:"blocktime_iota"`
|
||||
}
|
||||
|
||||
// DefaultConsensusConfig returns a default configuration for the consensus service
|
||||
@@ -769,6 +558,7 @@ func DefaultConsensusConfig() *ConsensusConfig {
|
||||
CreateEmptyBlocksInterval: 0 * time.Second,
|
||||
PeerGossipSleepDuration: 100 * time.Millisecond,
|
||||
PeerQueryMaj23SleepDuration: 2000 * time.Millisecond,
|
||||
BlockTimeIota: 1000 * time.Millisecond,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -785,9 +575,16 @@ func TestConsensusConfig() *ConsensusConfig {
|
||||
cfg.SkipTimeoutCommit = true
|
||||
cfg.PeerGossipSleepDuration = 5 * time.Millisecond
|
||||
cfg.PeerQueryMaj23SleepDuration = 250 * time.Millisecond
|
||||
cfg.BlockTimeIota = 10 * time.Millisecond
|
||||
return cfg
|
||||
}
|
||||
|
||||
// MinValidVoteTime returns the minimum acceptable block time.
|
||||
// See the [BFT time spec](https://godoc.org/github.com/tendermint/tendermint/docs/spec/consensus/bft-time.md).
|
||||
func (cfg *ConsensusConfig) MinValidVoteTime(lastBlockTime time.Time) time.Time {
|
||||
return lastBlockTime.Add(cfg.BlockTimeIota)
|
||||
}
|
||||
|
||||
// WaitForTxs returns true if the consensus should wait for transactions before entering the propose step
|
||||
func (cfg *ConsensusConfig) WaitForTxs() bool {
|
||||
return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0
|
||||
@@ -865,6 +662,9 @@ func (cfg *ConsensusConfig) ValidateBasic() error {
|
||||
if cfg.PeerQueryMaj23SleepDuration < 0 {
|
||||
return errors.New("peer_query_maj23_sleep_duration can't be negative")
|
||||
}
|
||||
if cfg.BlockTimeIota < 0 {
|
||||
return errors.New("blocktime_iota can't be negative")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -927,12 +727,12 @@ type InstrumentationConfig struct {
|
||||
PrometheusListenAddr string `mapstructure:"prometheus_listen_addr"`
|
||||
|
||||
// Maximum number of simultaneous connections.
|
||||
// If you want to accept a larger number than the default, make sure
|
||||
// If you want to accept more significant number than the default, make sure
|
||||
// you increase your OS limits.
|
||||
// 0 - unlimited.
|
||||
MaxOpenConnections int `mapstructure:"max_open_connections"`
|
||||
|
||||
// Instrumentation namespace.
|
||||
// Tendermint instrumentation namespace.
|
||||
Namespace string `mapstructure:"namespace"`
|
||||
}
|
||||
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -37,132 +36,3 @@ func TestConfigValidateBasic(t *testing.T) {
|
||||
cfg.Consensus.TimeoutPropose = -10 * time.Second
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
}
|
||||
|
||||
func TestTLSConfiguration(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
cfg := DefaultConfig()
|
||||
cfg.SetRoot("/home/user")
|
||||
|
||||
cfg.RPC.TLSCertFile = "file.crt"
|
||||
assert.Equal("/home/user/config/file.crt", cfg.RPC.CertFile())
|
||||
cfg.RPC.TLSKeyFile = "file.key"
|
||||
assert.Equal("/home/user/config/file.key", cfg.RPC.KeyFile())
|
||||
|
||||
cfg.RPC.TLSCertFile = "/abs/path/to/file.crt"
|
||||
assert.Equal("/abs/path/to/file.crt", cfg.RPC.CertFile())
|
||||
cfg.RPC.TLSKeyFile = "/abs/path/to/file.key"
|
||||
assert.Equal("/abs/path/to/file.key", cfg.RPC.KeyFile())
|
||||
}
|
||||
|
||||
func TestBaseConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestBaseConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
// tamper with log format
|
||||
cfg.LogFormat = "invalid"
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
}
|
||||
|
||||
func TestRPCConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestRPCConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
fieldsToTest := []string{
|
||||
"GRPCMaxOpenConnections",
|
||||
"MaxOpenConnections",
|
||||
"MaxSubscriptionClients",
|
||||
"MaxSubscriptionsPerClient",
|
||||
"TimeoutBroadcastTxCommit",
|
||||
"MaxBodyBytes",
|
||||
"MaxHeaderBytes",
|
||||
}
|
||||
|
||||
for _, fieldName := range fieldsToTest {
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1)
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestP2PConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestP2PConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
fieldsToTest := []string{
|
||||
"MaxNumInboundPeers",
|
||||
"MaxNumOutboundPeers",
|
||||
"FlushThrottleTimeout",
|
||||
"MaxPacketMsgPayloadSize",
|
||||
"SendRate",
|
||||
"RecvRate",
|
||||
}
|
||||
|
||||
for _, fieldName := range fieldsToTest {
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1)
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMempoolConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestMempoolConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
fieldsToTest := []string{
|
||||
"Size",
|
||||
"MaxTxsBytes",
|
||||
"CacheSize",
|
||||
"MaxTxBytes",
|
||||
}
|
||||
|
||||
for _, fieldName := range fieldsToTest {
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1)
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFastSyncConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestFastSyncConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
// tamper with version
|
||||
cfg.Version = "v1"
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
cfg.Version = "invalid"
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
}
|
||||
|
||||
func TestConsensusConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestConsensusConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
fieldsToTest := []string{
|
||||
"TimeoutPropose",
|
||||
"TimeoutProposeDelta",
|
||||
"TimeoutPrevote",
|
||||
"TimeoutPrevoteDelta",
|
||||
"TimeoutPrecommit",
|
||||
"TimeoutPrecommitDelta",
|
||||
"TimeoutCommit",
|
||||
"CreateEmptyBlocksInterval",
|
||||
"PeerGossipSleepDuration",
|
||||
"PeerQueryMaj23SleepDuration",
|
||||
}
|
||||
|
||||
for _, fieldName := range fieldsToTest {
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1)
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInstrumentationConfigValidateBasic(t *testing.T) {
|
||||
cfg := TestInstrumentationConfig()
|
||||
assert.NoError(t, cfg.ValidateBasic())
|
||||
|
||||
// tamper with maximum open connections
|
||||
cfg.MaxOpenConnections = -1
|
||||
assert.Error(t, cfg.ValidateBasic())
|
||||
}
|
||||
|
||||
181
config/toml.go
181
config/toml.go
@@ -2,17 +2,13 @@ package config
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"text/template"
|
||||
|
||||
cmn "github.com/tendermint/tendermint/libs/common"
|
||||
)
|
||||
|
||||
// DefaultDirPerm is the default permissions used when creating directories.
|
||||
const DefaultDirPerm = 0700
|
||||
|
||||
var configTemplate *template.Template
|
||||
|
||||
func init() {
|
||||
@@ -27,14 +23,14 @@ func init() {
|
||||
// EnsureRoot creates the root, config, and data directories if they don't exist,
|
||||
// and panics if it fails.
|
||||
func EnsureRoot(rootDir string) {
|
||||
if err := cmn.EnsureDir(rootDir, DefaultDirPerm); err != nil {
|
||||
panic(err.Error())
|
||||
if err := cmn.EnsureDir(rootDir, 0700); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
|
||||
panic(err.Error())
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), 0700); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
|
||||
panic(err.Error())
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), 0700); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
|
||||
configFilePath := filepath.Join(rootDir, defaultConfigFilePath)
|
||||
@@ -79,20 +75,9 @@ moniker = "{{ .BaseConfig.Moniker }}"
|
||||
# If this node is many blocks behind the tip of the chain, FastSync
|
||||
# allows them to catchup quickly by downloading blocks in parallel
|
||||
# and verifying their commits
|
||||
fast_sync = {{ .BaseConfig.FastSyncMode }}
|
||||
fast_sync = {{ .BaseConfig.FastSync }}
|
||||
|
||||
# Database backend: goleveldb | cleveldb | boltdb
|
||||
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
|
||||
# - pure go
|
||||
# - stable
|
||||
# * cleveldb (uses levigo wrapper)
|
||||
# - fast
|
||||
# - requires gcc
|
||||
# - use cleveldb build tag (go build -tags cleveldb)
|
||||
# * boltdb (uses etcd's fork of bolt - github.com/etcd-io/bbolt)
|
||||
# - EXPERIMENTAL
|
||||
# - may be faster is some use-cases (random reads - indexer)
|
||||
# - use boltdb build tag (go build -tags boltdb)
|
||||
# Database backend: leveldb | memdb | cleveldb
|
||||
db_backend = "{{ .BaseConfig.DBBackend }}"
|
||||
|
||||
# Database directory
|
||||
@@ -101,19 +86,13 @@ db_dir = "{{ js .BaseConfig.DBPath }}"
|
||||
# Output level for logging, including package level options
|
||||
log_level = "{{ .BaseConfig.LogLevel }}"
|
||||
|
||||
# Output format: 'plain' (colored text) or 'json'
|
||||
log_format = "{{ .BaseConfig.LogFormat }}"
|
||||
|
||||
##### additional base config options #####
|
||||
|
||||
# Path to the JSON file containing the initial validator set and other meta data
|
||||
genesis_file = "{{ js .BaseConfig.Genesis }}"
|
||||
|
||||
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
|
||||
priv_validator_key_file = "{{ js .BaseConfig.PrivValidatorKey }}"
|
||||
|
||||
# Path to the JSON file containing the last sign state of a validator
|
||||
priv_validator_state_file = "{{ js .BaseConfig.PrivValidatorState }}"
|
||||
priv_validator_file = "{{ js .BaseConfig.PrivValidator }}"
|
||||
|
||||
# TCP or UNIX socket address for Tendermint to listen on for
|
||||
# connections from an external PrivValidator process
|
||||
@@ -140,24 +119,13 @@ filter_peers = {{ .BaseConfig.FilterPeers }}
|
||||
# TCP or UNIX socket address for the RPC server to listen on
|
||||
laddr = "{{ .RPC.ListenAddress }}"
|
||||
|
||||
# A list of origins a cross-domain request can be executed from
|
||||
# Default value '[]' disables cors support
|
||||
# Use '["*"]' to allow any origin
|
||||
cors_allowed_origins = [{{ range .RPC.CORSAllowedOrigins }}{{ printf "%q, " . }}{{end}}]
|
||||
|
||||
# A list of methods the client is allowed to use with cross-domain requests
|
||||
cors_allowed_methods = [{{ range .RPC.CORSAllowedMethods }}{{ printf "%q, " . }}{{end}}]
|
||||
|
||||
# A list of non simple headers the client is allowed to use with cross-domain requests
|
||||
cors_allowed_headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}{{end}}]
|
||||
|
||||
# TCP or UNIX socket address for the gRPC server to listen on
|
||||
# NOTE: This server only supports /broadcast_tx_commit
|
||||
grpc_laddr = "{{ .RPC.GRPCListenAddress }}"
|
||||
|
||||
# Maximum number of simultaneous connections.
|
||||
# Does not include RPC (HTTP&WebSocket) connections. See max_open_connections
|
||||
# If you want to accept a larger number than the default, make sure
|
||||
# If you want to accept more significant number than the default, make sure
|
||||
# you increase your OS limits.
|
||||
# 0 - unlimited.
|
||||
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
|
||||
@@ -169,48 +137,13 @@ unsafe = {{ .RPC.Unsafe }}
|
||||
|
||||
# Maximum number of simultaneous connections (including WebSocket).
|
||||
# Does not include gRPC connections. See grpc_max_open_connections
|
||||
# If you want to accept a larger number than the default, make sure
|
||||
# If you want to accept more significant number than the default, make sure
|
||||
# you increase your OS limits.
|
||||
# 0 - unlimited.
|
||||
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
|
||||
# 1024 - 40 - 10 - 50 = 924 = ~900
|
||||
max_open_connections = {{ .RPC.MaxOpenConnections }}
|
||||
|
||||
# Maximum number of unique clientIDs that can /subscribe
|
||||
# If you're using /broadcast_tx_commit, set to the estimated maximum number
|
||||
# of broadcast_tx_commit calls per block.
|
||||
max_subscription_clients = {{ .RPC.MaxSubscriptionClients }}
|
||||
|
||||
# Maximum number of unique queries a given client can /subscribe to
|
||||
# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to
|
||||
# the estimated # maximum number of broadcast_tx_commit calls per block.
|
||||
max_subscriptions_per_client = {{ .RPC.MaxSubscriptionsPerClient }}
|
||||
|
||||
# How long to wait for a tx to be committed during /broadcast_tx_commit.
|
||||
# WARNING: Using a value larger than 10s will result in increasing the
|
||||
# global HTTP write timeout, which applies to all connections and endpoints.
|
||||
# See https://github.com/tendermint/tendermint/issues/3435
|
||||
timeout_broadcast_tx_commit = "{{ .RPC.TimeoutBroadcastTxCommit }}"
|
||||
|
||||
# Maximum size of request body, in bytes
|
||||
max_body_bytes = {{ .RPC.MaxBodyBytes }}
|
||||
|
||||
# Maximum size of request header, in bytes
|
||||
max_header_bytes = {{ .RPC.MaxHeaderBytes }}
|
||||
|
||||
# The path to a file containing certificate that is used to create the HTTPS server.
|
||||
# Migth be either absolute path or path related to tendermint's config directory.
|
||||
# If the certificate is signed by a certificate authority,
|
||||
# the certFile should be the concatenation of the server's certificate, any intermediates,
|
||||
# and the CA's certificate.
|
||||
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run.
|
||||
tls_cert_file = "{{ .RPC.TLSCertFile }}"
|
||||
|
||||
# The path to a file containing matching private key that is used to create the HTTPS server.
|
||||
# Migth be either absolute path or path related to tendermint's config directory.
|
||||
# NOTE: both tls_cert_file and tls_key_file must be present for Tendermint to create HTTPS server. Otherwise, HTTP server is run.
|
||||
tls_key_file = "{{ .RPC.TLSKeyFile }}"
|
||||
|
||||
##### peer to peer configuration options #####
|
||||
[p2p]
|
||||
|
||||
@@ -283,29 +216,12 @@ recheck = {{ .Mempool.Recheck }}
|
||||
broadcast = {{ .Mempool.Broadcast }}
|
||||
wal_dir = "{{ js .Mempool.WalPath }}"
|
||||
|
||||
# Maximum number of transactions in the mempool
|
||||
# size of the mempool
|
||||
size = {{ .Mempool.Size }}
|
||||
|
||||
# Limit the total size of all txs in the mempool.
|
||||
# This only accounts for raw transactions (e.g. given 1MB transactions and
|
||||
# max_txs_bytes=5MB, mempool will only accept 5 transactions).
|
||||
max_txs_bytes = {{ .Mempool.MaxTxsBytes }}
|
||||
|
||||
# Size of the cache (used to filter transactions we saw earlier) in transactions
|
||||
# size of the cache (used to filter transactions we saw earlier)
|
||||
cache_size = {{ .Mempool.CacheSize }}
|
||||
|
||||
# Maximum size of a single transaction.
|
||||
# NOTE: the max size of a tx transmitted over the network is {max_tx_bytes} + {amino overhead}.
|
||||
max_tx_bytes = {{ .Mempool.MaxTxBytes }}
|
||||
|
||||
##### fast sync configuration options #####
|
||||
[fastsync]
|
||||
|
||||
# Fast Sync version to use:
|
||||
# 1) "v0" (default) - the legacy fast sync implementation
|
||||
# 2) "v1" - refactor of v0 version for better testability
|
||||
version = "{{ .FastSync.Version }}"
|
||||
|
||||
##### consensus configuration options #####
|
||||
[consensus]
|
||||
|
||||
@@ -336,8 +252,8 @@ peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
|
||||
# What indexer to use for transactions
|
||||
#
|
||||
# Options:
|
||||
# 1) "null"
|
||||
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
# 1) "null" (default)
|
||||
# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
indexer = "{{ .TxIndex.Indexer }}"
|
||||
|
||||
# Comma-separated list of tags to index (by default the only tag is "tx.hash")
|
||||
@@ -369,7 +285,7 @@ prometheus = {{ .Instrumentation.Prometheus }}
|
||||
prometheus_listen_addr = "{{ .Instrumentation.PrometheusListenAddr }}"
|
||||
|
||||
# Maximum number of simultaneous connections.
|
||||
# If you want to accept a larger number than the default, make sure
|
||||
# If you want to accept more significant number than the default, make sure
|
||||
# you increase your OS limits.
|
||||
# 0 - unlimited.
|
||||
max_open_connections = {{ .Instrumentation.MaxOpenConnections }}
|
||||
@@ -381,51 +297,53 @@ namespace = "{{ .Instrumentation.Namespace }}"
|
||||
/****** these are for test settings ***********/
|
||||
|
||||
func ResetTestRoot(testName string) *Config {
|
||||
return ResetTestRootWithChainID(testName, "")
|
||||
}
|
||||
|
||||
func ResetTestRootWithChainID(testName string, chainID string) *Config {
|
||||
// create a unique, concurrency-safe test directory under os.TempDir()
|
||||
rootDir, err := ioutil.TempDir("", fmt.Sprintf("%s-%s_", chainID, testName))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
rootDir := os.ExpandEnv("$HOME/.tendermint_test")
|
||||
rootDir = filepath.Join(rootDir, testName)
|
||||
// Remove ~/.tendermint_test_bak
|
||||
if cmn.FileExists(rootDir + "_bak") {
|
||||
if err := os.RemoveAll(rootDir + "_bak"); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
}
|
||||
// ensure config and data subdirs are created
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), DefaultDirPerm); err != nil {
|
||||
panic(err)
|
||||
// Move ~/.tendermint_test to ~/.tendermint_test_bak
|
||||
if cmn.FileExists(rootDir) {
|
||||
if err := os.Rename(rootDir, rootDir+"_bak"); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
}
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), DefaultDirPerm); err != nil {
|
||||
panic(err)
|
||||
// Create new dir
|
||||
if err := cmn.EnsureDir(rootDir, 0700); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), 0700); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), 0700); err != nil {
|
||||
cmn.PanicSanity(err.Error())
|
||||
}
|
||||
|
||||
baseConfig := DefaultBaseConfig()
|
||||
configFilePath := filepath.Join(rootDir, defaultConfigFilePath)
|
||||
genesisFilePath := filepath.Join(rootDir, baseConfig.Genesis)
|
||||
privKeyFilePath := filepath.Join(rootDir, baseConfig.PrivValidatorKey)
|
||||
privStateFilePath := filepath.Join(rootDir, baseConfig.PrivValidatorState)
|
||||
privFilePath := filepath.Join(rootDir, baseConfig.PrivValidator)
|
||||
|
||||
// Write default config file if missing.
|
||||
if !cmn.FileExists(configFilePath) {
|
||||
writeDefaultConfigFile(configFilePath)
|
||||
}
|
||||
if !cmn.FileExists(genesisFilePath) {
|
||||
if chainID == "" {
|
||||
chainID = "tendermint_test"
|
||||
}
|
||||
testGenesis := fmt.Sprintf(testGenesisFmt, chainID)
|
||||
cmn.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644)
|
||||
}
|
||||
// we always overwrite the priv val
|
||||
cmn.MustWriteFile(privKeyFilePath, []byte(testPrivValidatorKey), 0644)
|
||||
cmn.MustWriteFile(privStateFilePath, []byte(testPrivValidatorState), 0644)
|
||||
cmn.MustWriteFile(privFilePath, []byte(testPrivValidator), 0644)
|
||||
|
||||
config := TestConfig().SetRoot(rootDir)
|
||||
return config
|
||||
}
|
||||
|
||||
var testGenesisFmt = `{
|
||||
"genesis_time": "2018-10-10T08:20:13.695936996Z",
|
||||
"chain_id": "%s",
|
||||
var testGenesis = `{
|
||||
"genesis_time": "2017-10-10T08:20:13.695936996Z",
|
||||
"chain_id": "tendermint_test",
|
||||
"validators": [
|
||||
{
|
||||
"pub_key": {
|
||||
@@ -439,7 +357,7 @@ var testGenesisFmt = `{
|
||||
"app_hash": ""
|
||||
}`
|
||||
|
||||
var testPrivValidatorKey = `{
|
||||
var testPrivValidator = `{
|
||||
"address": "A3258DCBF45DCA0DF052981870F2D1441A36D145",
|
||||
"pub_key": {
|
||||
"type": "tendermint/PubKeyEd25519",
|
||||
@@ -448,11 +366,8 @@ var testPrivValidatorKey = `{
|
||||
"priv_key": {
|
||||
"type": "tendermint/PrivKeyEd25519",
|
||||
"value": "EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ=="
|
||||
}
|
||||
}`
|
||||
|
||||
var testPrivValidatorState = `{
|
||||
"height": "0",
|
||||
"round": "0",
|
||||
"step": 0
|
||||
},
|
||||
"last_height": "0",
|
||||
"last_round": "0",
|
||||
"last_step": 0
|
||||
}`
|
||||
|
||||
@@ -48,7 +48,6 @@ func TestEnsureTestRoot(t *testing.T) {
|
||||
|
||||
// create root dir
|
||||
cfg := ResetTestRoot(testName)
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
rootDir := cfg.RootDir
|
||||
|
||||
// make sure config is set properly
|
||||
@@ -61,7 +60,7 @@ func TestEnsureTestRoot(t *testing.T) {
|
||||
|
||||
// TODO: make sure the cfg returned and testconfig are the same!
|
||||
baseConfig := DefaultBaseConfig()
|
||||
ensureFiles(t, rootDir, defaultDataDir, baseConfig.Genesis, baseConfig.PrivValidatorKey, baseConfig.PrivValidatorState)
|
||||
ensureFiles(t, rootDir, defaultDataDir, baseConfig.Genesis, baseConfig.PrivValidator)
|
||||
}
|
||||
|
||||
func checkConfig(configFile string) bool {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user