Compare commits

..

1 Commits

Author SHA1 Message Date
Callum Waters
f51eee14e7 explicitly set block executor event bus 2021-04-06 14:25:58 +02:00
750 changed files with 17377 additions and 49846 deletions

15
.github/CODEOWNERS vendored
View File

@@ -7,4 +7,17 @@
# global owners are only requested if there isn't a more specific
# codeowner specified below. For this reason, the global codeowners
# are often repeated in package-level definitions.
* @alexanderbez @ebuchman @cmwaters @tessr @tychoish @williambanfield @creachadair
* @alexanderbez @ebuchman @tessr
# Overrides for tooling packages
.github/ @marbar3778 @alexanderbez @ebuchman @tessr
DOCKER/ @marbar3778 @alexanderbez @ebuchman @tessr
# Overrides for core Tendermint packages
abci/ @marbar3778 @alexanderbez @ebuchman @tessr @tychoish
evidence/ @cmwaters @ebuchman @tessr @tychoish
light/ @cmwaters @ebuchman @tessr @tychoish
# Overrides for docs
*.md @marbar3778 @alexanderbez @ebuchman @tessr
docs/ @marbar3778 @alexanderbez @ebuchman @tessr

14
.github/codecov.yml vendored
View File

@@ -5,14 +5,19 @@ coverage:
status:
project:
default:
threshold: 20%
patch: off
threshold: 1%
patch: on
changes: off
github_checks:
annotations: false
comment: false
comment:
layout: "diff, files"
behavior: default
require_changes: no
require_base: no
require_head: yes
ignore:
- "docs"
@@ -20,6 +25,3 @@ ignore:
- "scripts"
- "**/*.pb.go"
- "libs/pubsub/query/query.peg.go"
- "*.md"
- "*.rst"
- "*.yml"

View File

@@ -2,9 +2,6 @@ name: Test Coverage
on:
pull_request:
push:
paths:
- "**.go"
- "!test/"
branches:
- master
- release/**
@@ -13,7 +10,7 @@ jobs:
split-test-files:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.3.4
- uses: actions/checkout@v2
- name: Create a file with all the pkgs
run: go list ./... > pkgs.txt
- name: Split pkgs into 4 files
@@ -48,12 +45,11 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
"!test/"
go.mod
go.sum
- name: install
@@ -71,12 +67,11 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
"!test/"
go.mod
go.sum
- uses: actions/download-artifact@v2
@@ -100,12 +95,11 @@ jobs:
runs-on: ubuntu-latest
needs: tests
steps:
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
"!test/"
go.mod
go.sum
- uses: actions/download-artifact@v2
@@ -127,7 +121,7 @@ jobs:
- run: |
cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt
if: env.GIT_DIFF
- uses: codecov/codecov-action@v2.1.0
- uses: codecov/codecov-action@v1.3.2
with:
file: ./coverage.txt
if: env.GIT_DIFF

View File

@@ -14,7 +14,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.3.4
- uses: actions/checkout@master
- name: Prepare
id: prep
run: |
@@ -40,17 +40,17 @@ jobs:
platforms: all
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1.6.0
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
if: ${{ github.event_name != 'pull_request' }}
uses: docker/login-action@v1.10.0
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Publish to Docker Hub
uses: docker/build-push-action@v2.7.0
uses: docker/build-push-action@v2
with:
context: .
file: ./DOCKER/Dockerfile

32
.github/workflows/docs.yml vendored Normal file
View File

@@ -0,0 +1,32 @@
name: Documentation
# This job builds and deploys documentation to github pages.
# It runs on every push to master, and can be manually triggered.
on:
workflow_dispatch: # allow running workflow manually
push:
branches:
- master
jobs:
build-and-deploy:
runs-on: ubuntu-latest
container:
image: tendermintdev/docker-website-deployment
steps:
- name: Checkout 🛎️
uses: actions/checkout@v2.3.1
with:
persist-credentials: false
fetch-depth: 0
- name: Install and Build 🔧
run: |
apk add rsync
make build-docs
- name: Deploy 🚀
uses: JamesIves/github-pages-deploy-action@4.1.0
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BRANCH: gh-pages
FOLDER: ~/output

View File

@@ -25,7 +25,7 @@ jobs:
with:
go-version: '1.16'
- uses: actions/checkout@v2.3.4
- uses: actions/checkout@v2
with:
ref: 'v0.34.x'
@@ -49,7 +49,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Notify Slack on failure
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal
@@ -65,7 +65,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Notify Slack on success
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal

View File

@@ -16,8 +16,7 @@ jobs:
strategy:
fail-fast: false
matrix:
p2p: ['legacy', 'new', 'hybrid']
group: ['00', '01']
group: ['00', '01', '02', '03']
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
@@ -25,21 +24,21 @@ jobs:
with:
go-version: '1.16'
- uses: actions/checkout@v2.3.4
- uses: actions/checkout@v2
- name: Build
working-directory: test/e2e
# Run make jobs in parallel, since we can't run steps in parallel.
run: make -j2 docker generator runner tests
run: make -j2 docker generator runner
- name: Generate testnets
working-directory: test/e2e
# When changing -g, also change the matrix groups above
run: ./build/generator -g 2 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }}
run: ./build/generator -g 4 -d networks/nightly
- name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }}
- name: Run testnets in group ${{ matrix.group }}
working-directory: test/e2e
run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml
run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml
e2e-nightly-fail-2:
needs: e2e-nightly-test-2
@@ -47,7 +46,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Notify Slack on failure
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal
@@ -63,7 +62,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Notify Slack on success
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal

View File

@@ -17,8 +17,8 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: '1.16'
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
@@ -28,7 +28,7 @@ jobs:
- name: Build
working-directory: test/e2e
# Run two make jobs in parallel, since we can't run steps in parallel.
run: make -j2 docker runner tests
run: make -j2 docker runner
if: "env.GIT_DIFF != ''"
- name: Run CI testnet

View File

@@ -17,20 +17,15 @@ jobs:
with:
go-version: '1.16'
- uses: actions/checkout@v2.3.4
- uses: actions/checkout@v2
- name: Install go-fuzz
working-directory: test/fuzz
run: go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-build
- name: Fuzz mempool-v1
- name: Fuzz mempool
working-directory: test/fuzz
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v1
continue-on-error: true
- name: Fuzz mempool-v0
working-directory: test/fuzz
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool-v0
run: timeout -s SIGINT --preserve-status 10m make fuzz-mempool
continue-on-error: true
- name: Fuzz p2p-addrbook
@@ -81,7 +76,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Notify Slack if any crashers
uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7
uses: rtCamp/action-slack-notify@f565a63638bd3615e76249bffab00fcb9dab90f7
env:
SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }}
SLACK_CHANNEL: tendermint-internal

View File

@@ -10,7 +10,7 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 3
steps:
- uses: styfle/cancel-workflow-action@0.9.1
- uses: styfle/cancel-workflow-action@0.8.0
with:
workflow_id: 1041851,1401230,2837803
access_token: ${{ github.token }}

View File

@@ -46,7 +46,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout the Jepsen repository
uses: actions/checkout@v2.3.4
uses: actions/checkout@v2
with:
repository: 'tendermint/jepsen'

View File

@@ -6,7 +6,7 @@ jobs:
markdown-link-check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.3.4
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
- uses: actions/checkout@master
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.12
with:
folder-path: "docs"

View File

@@ -13,8 +13,8 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 8
steps:
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go

View File

@@ -11,7 +11,6 @@ on:
branches: [master]
paths:
- "**.md"
- "**.yml"
jobs:
build:
@@ -19,7 +18,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout Code
uses: actions/checkout@v2.3.4
uses: actions/checkout@v2
- name: Lint Code Base
uses: docker://github/super-linter:v3
env:
@@ -28,5 +27,5 @@ jobs:
DEFAULT_BRANCH: master
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
VALIDATE_MD: true
VALIDATE_OPENAPI: true
VALIDATE_OPAENAPI: true
VALIDATE_YAML: true

View File

@@ -16,7 +16,7 @@ jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2.3.4
- uses: actions/checkout@master
- name: Prepare
id: prep
run: |
@@ -34,16 +34,16 @@ jobs:
echo ::set-output name=tags::${TAGS}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1.6.0
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
uses: docker/login-action@v1.10.0
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Publish to Docker Hub
uses: docker/build-push-action@v2.7.0
uses: docker/build-push-action@v2
with:
context: ./tools/proto
file: ./tools/proto/Dockerfile

View File

@@ -11,13 +11,13 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 4
steps:
- uses: actions/checkout@v2.3.4
- uses: actions/checkout@master
- name: lint
run: make proto-lint
proto-breakage:
runs-on: ubuntu-latest
timeout-minutes: 4
steps:
- uses: actions/checkout@v2.3.4
- uses: actions/checkout@master
- name: check-breakage
run: make proto-check-breaking-ci

View File

@@ -2,7 +2,7 @@ name: "Release"
on:
push:
branches:
branches:
- "RC[0-9]/**"
tags:
- "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10
@@ -12,7 +12,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2.3.4
uses: actions/checkout@v2
with:
fetch-depth: 0
@@ -20,6 +20,9 @@ jobs:
with:
go-version: '1.16'
- run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md
if: startsWith(github.ref, 'refs/tags/')
- name: Build
uses: goreleaser/goreleaser-action@v2
if: ${{ github.event_name == 'pull_request' }}
@@ -32,6 +35,6 @@ jobs:
if: startsWith(github.ref, 'refs/tags/')
with:
version: latest
args: release --rm-dist
args: release --rm-dist --release-notes=../release_notes.md
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View File

@@ -7,14 +7,12 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v4
- uses: actions/stale@v3
with:
repo-token: ${{ secrets.GITHUB_TOKEN }}
stale-pr-message: "This pull request has been automatically marked as stale because it has not had
recent activity. It will be closed if no further activity occurs. Thank you
for your contributions."
days-before-stale: -1
days-before-close: -1
days-before-pr-stale: 10
days-before-pr-close: 4
days-before-stale: 10
days-before-close: 4
exempt-pr-labels: "S:wip"

View File

@@ -18,8 +18,8 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
@@ -28,7 +28,7 @@ jobs:
- name: install
run: make install install_abci
if: "env.GIT_DIFF != ''"
- uses: actions/cache@v2.1.6
- uses: actions/cache@v2.1.4
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
@@ -36,12 +36,44 @@ jobs:
${{ runner.os }}-go-
if: env.GIT_DIFF
# Cache binaries for use by other jobs
- uses: actions/cache@v2.1.6
- uses: actions/cache@v2.1.4
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
if: env.GIT_DIFF
test_abci_apps:
runs-on: ubuntu-latest
needs: build
timeout-minutes: 5
steps:
- uses: actions/setup-go@v2
with:
go-version: "1.16"
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- uses: actions/cache@v2.1.4
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
if: env.GIT_DIFF
- uses: actions/cache@v2.1.4
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
if: env.GIT_DIFF
- name: test_abci_apps
run: abci/tests/test_app/test.sh
shell: bash
if: env.GIT_DIFF
test_abci_cli:
runs-on: ubuntu-latest
needs: build
@@ -50,21 +82,21 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- uses: actions/cache@v2.1.6
- uses: actions/cache@v2.1.4
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
if: env.GIT_DIFF
- uses: actions/cache@v2.1.6
- uses: actions/cache@v2.1.4
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary
@@ -81,21 +113,21 @@ jobs:
- uses: actions/setup-go@v2
with:
go-version: "1.16"
- uses: actions/checkout@v2.3.4
- uses: technote-space/get-diff-action@v5
- uses: actions/checkout@v2
- uses: technote-space/get-diff-action@v4
with:
PATTERNS: |
**/**.go
go.mod
go.sum
- uses: actions/cache@v2.1.6
- uses: actions/cache@v2.1.4
with:
path: ~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
if: env.GIT_DIFF
- uses: actions/cache@v2.1.6
- uses: actions/cache@v2.1.4
with:
path: ~/go/bin
key: ${{ runner.os }}-${{ github.sha }}-tm-binary

3
.gitignore vendored
View File

@@ -15,7 +15,7 @@
.vagrant
.vendor-new/
.vscode/
abci/abci-cli
abci-cli
addrbook.json
artifacts/*
build/*
@@ -24,7 +24,6 @@ docs/.vuepress/dist
docs/_build
docs/dist
docs/node_modules/
docs/spec
index.html.md
libs/pubsub/query/fuzz_test/output
profile\.out

View File

@@ -1,17 +1,14 @@
linters:
enable:
- asciicheck
- bodyclose
- deadcode
- depguard
- dogsled
- dupl
- errcheck
- exportloopref
# - funlen
# - gochecknoglobals
# - gochecknoinits
# - gocognit
- goconst
- gocritic
# - gocyclo
@@ -25,11 +22,11 @@ linters:
- ineffassign
# - interfacer
- lll
# - maligned
- misspell
# - maligned
- nakedret
- nolintlint
- prealloc
- scopelint
- staticcheck
- structcheck
- stylecheck
@@ -40,6 +37,8 @@ linters:
- varcheck
# - whitespace
# - wsl
# - gocognit
- nolintlint
issues:
exclude-rules:

View File

@@ -1,255 +1,5 @@
# Changelog
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
## v0.35
Special thanks to external contributors on this release: @JayT106, @bipulprasad, @alessio, @Yawning, @silasdavis,
@cuonglm, @tanyabouman, @JoeKash, @githubsands, @jeebster, @crypto-facs, @liamsi, and @gotjoshua
### BREAKING CHANGES
- CLI/RPC/Config
- [pubsub/events] \#6634 The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez)
- [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
- [config] \#5728 `fastsync.version = "v1"` is no longer supported (@melekes)
- [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
- [cli] \#5777 use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters)
- [rpc] \#6019 standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters)
- [rpc] \#6168 Change default sorting to desc for `/tx_search` results (@melekes)
- [cli] \#6282 User must specify the node mode when using `tendermint init` (@cmwaters)
- [state/indexer] \#6382 reconstruct indexer, move txindex into the indexer package (@JayT106)
- [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters)
- [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
- [rpc] \#6610 Add MaxPeerBlockHeight into /status rpc call (@JayT106)
- [blocksync/rpc] \#6620 Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106)
- [rpc/grpc] \#6725 Mark gRPC in the RPC layer as deprecated.
- [blocksync/v2] \#6730 Fast Sync v2 is deprecated, please use v0
- [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents.
- [rpc/jsonrpc/server] \#6785 `Listen` function updated to take an `int` argument, `maxOpenConnections`, instead of an entire config object. (@williambanfield)
- [rpc] \#6820 Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users.
- [cli] \#6854 Remove deprecated snake case commands. (@tychoish)
- Apps
- [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
- [ABCI] \#3546 Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield)
- [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`.
- It is not required any longer to set ldflags to set version strings
- [abci/counter] \#6684 Delete counter example app
- Go API
- [pubsub] \#6634 The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez)
- [p2p] \#6618 \#6583 Move `p2p.NodeInfo`, `p2p.NodeID` and `p2p.NetAddress` into `types` to support use in external packages. (@tychoish)
- [node] \#6540 Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish)
- [p2p] \#6547 Move the entire `p2p` package and all reactor implementations into `internal`. (@tychoish)
- [libs/log] \#6534 Remove the existing custom Tendermint logger backed by go-kit. The logging interface, `Logger`, remains. Tendermint still provides a default logger backed by the performant zerolog logger. (@alexanderbez)
- [libs/time] \#6495 Move types/time to libs/time to improve consistency. (@tychoish)
- [mempool] \#6529 The `Context` field has been removed from the `TxInfo` type. `CheckTx` now requires a `Context` argument. (@alexanderbez)
- [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes)
- [p2p] Remove unused function `MakePoWTarget`. (@erikgrinaker)
- [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes)
- [proto/p2p] Rename `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
- [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker)
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
- [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters)
- [state] \#5864 Use an iterator when pruning state (@cmwaters)
- [types] \#6023 Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`.
- Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic.
- [light] \#6054 Move `MaxRetryAttempt` option from client to provider.
- `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters)
- [all] \#6077 Change spelling from British English to American (@cmwaters)
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
- Rename "behaviour" pkg to "behavior" and internalized it in blocksync v2
- [rpc/client/http] \#6176 Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes)
- [rpc/client/http] \#6176 Unexpose `WSEvents` (@melekes)
- [rpc/jsonrpc/client/ws_client] \#6176 `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes)
- [internal/libs] \#6366 Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder
- [libs/rand] \#6364 Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi)
- [mempool] \#6466 The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package.
Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself
and `TxInfo`. (@alexanderbez)
- [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning)
- [types] \#6627 Move `NodeKey` to types to make the type public.
- [config] \#6627 Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID`
- [blocksync] \#6755 Rename `FastSync` and `Blockchain` package to `BlockSync` (@cmwaters)
- Data Storage
- [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters)
- [mempool] \#6396 Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish)
- [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106)
- Tooling
- [tools] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106)
- [cli/indexer] \#6676 Reindex events command line tooling. (@JayT106)
### FEATURES
- [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam
- [rpc] \#6329 Don't cap page size in unsafe mode (@gotjoshua, @cmwaters)
- [pex] \#6305 v2 pex reactor with backwards compatability. Introduces two new pex messages to
accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer
exchange reactors behave the same. (@cmwaters)
- [crypto] \#6376 Enable sr25519 as a validator key type
- [mempool] \#6466 Introduction of a prioritized mempool. (@alexanderbez)
- `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of
the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index.
- Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the
`mempool.version` configuration, where `v1` is the default configuration.
- Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node.
- Transactions are gossiped in FIFO order as they are in `v0`.
- [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106)
- [blocksync/event] \#6619 Emit blocksync status event when switching consensus/blocksync (@JayT106)
- [statesync/event] \#6700 Emit statesync status start/end event (@JayT106)
- [inspect] \#6785 Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield)
### IMPROVEMENTS
- [libs/log] Console log formatting changes as a result of \#6534 and \#6589. (@tychoish)
- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
- [types] \#6478 Add `block_id` to `newblock` event (@jeebster)
- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778)
- [crypto/ed25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning)
- [crypto/sr25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning)
- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys.
- [privval] \#5725 Add gRPC support to private validator.
- [privval] \#5876 `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778)
- [abci/client] \#5673 `Async` requests return an error if queue is full (@melekes)
- [mempool] \#5673 Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes)
- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
- [blocksync/v1] \#5728 Remove blocksync v1 (@melekes)
- [blocksync/v0] \#5741 Relax termination conditions and increase sync timeout (@melekes)
- [cli] \#5772 `gen_node_key` output now contains node ID (`id` field) (@melekes)
- [blocksync/v2] \#5774 Send status request when new peer joins (@melekes)
- [store] \#5888 store.SaveBlock saves using batches instead of transactions for now to improve ACID properties. This is a quick fix for underlying issues around tm-db and ACID guarantees. (@githubsands)
- [consensus] \#5987 and \#5792 Remove the `time_iota_ms` consensus parameter. Merge `tmproto.ConsensusParams` and `abci.ConsensusParams`. (@marbar3778, @valardragon)
- [types] \#5994 Reduce the use of protobuf types in core logic. (@marbar3778)
- `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams`, `sm.Version` and `version.Consensus` have become native types. They still utilize protobuf when being sent over the wire or written to disk.
- [rpc/client/http] \#6163 Do not drop events even if the `out` channel is full (@melekes)
- [node] \#6059 Validate and complete genesis doc before saving to state store (@silasdavis)
- [state] \#6067 Batch save state data (@githubsands & @cmwaters)
- [crypto] \#6120 Implement batch verification interface for ed25519 and sr25519. (@marbar3778)
- [types] \#6120 use batch verification for verifying commits signatures.
- If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature.
- [privval/file] \#6185 Return error on `LoadFilePV`, `LoadFilePVEmptyState`. Allows for better programmatic control of Tendermint.
- [privval] \#6240 Add `context.Context` to privval interface.
- [rpc] \#6265 set cache control in http-rpc response header (@JayT106)
- [statesync] \#6378 Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots.
- [node/state] \#6370 graceful shutdown in the consensus reactor (@JayT106)
- [crypto/merkle] \#6443 Improve HashAlternatives performance (@cuonglm)
- [crypto/merkle] \#6513 Optimize HashAlternatives (@marbar3778)
- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm)
- [consensus/metrics] \#6549 Change block_size gauge to a histogram for better observability over time (@marbar3778)
- [statesync] \#6587 Increase chunk priority and re-request chunks that don't arrive (@cmwaters)
- [state/privval] \#6578 No GetPubKey retry beyond the proposal/voting window (@JayT106)
- [rpc] \#6615 Add TotalGasUsed to block_results response (@crypto-facs)
- [cmd/tendermint/commands] \#6623 replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
- [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters)
### BUG FIXES
- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters)
- [rpc] \#6507 Ensure RPC client can handle URLs without ports (@JayT106)
- [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
- [blocksync] \#6590 Update the metrics during blocksync (@JayT106)
## v0.34.13
*September 6, 2021*
This release backports improvements to state synchronization and ABCI
performance under concurrent load, and the PostgreSQL event indexer.
### IMPROVEMENTS
- [statesync] [\#6881](https://github.com/tendermint/tendermint/issues/6881) improvements to stateprovider logic (@cmwaters)
- [ABCI] [\#6873](https://github.com/tendermint/tendermint/issues/6873) change client to use multi-reader mutexes (@tychoish)
- [indexing] [\#6906](https://github.com/tendermint/tendermint/issues/6906) enable the PostgreSQL indexer sink (@creachadair)
## v0.34.12
*August 17, 2021*
Special thanks to external contributors on this release: @JayT106.
### FEATURES
- [rpc] [\#6717](https://github.com/tendermint/tendermint/pull/6717) introduce
`/genesis_chunked` rpc endpoint for handling large genesis files by chunking them (@tychoish)
### IMPROVEMENTS
- [rpc] [\#6825](https://github.com/tendermint/tendermint/issues/6825) Remove egregious INFO log from `ABCI#Query` RPC. (@alexanderbez)
### BUG FIXES
- [light] [\#6685](https://github.com/tendermint/tendermint/pull/6685) fix bug
with incorrectly handling contexts that would occasionally freeze state sync. (@cmwaters)
- [privval] [\#6748](https://github.com/tendermint/tendermint/issues/6748) Fix vote timestamp to prevent chain halt (@JayT106)
## v0.34.11
*June 18, 2021*
This release improves the robustness of statesync; tweaking channel priorities and timeouts and
adding two new parameters to the state sync config.
### BREAKING CHANGES
- Apps
- [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` is not required to be set as a ldflag any longer.
### IMPROVEMENTS
- [statesync] [\#6566](https://github.com/tendermint/tendermint/pull/6566) Allow state sync fetchers and request timeout to be configurable. (@alexanderbez)
- [statesync] [\#6378](https://github.com/tendermint/tendermint/pull/6378) Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish)
- [statesync] [\#6582](https://github.com/tendermint/tendermint/pull/6582) Increase chunk priority and add multiple retry chunk requests (@cmwaters)
### BUG FIXES
- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters)
## v0.34.10
*April 14, 2021*
This release fixes a bug where peers would sometimes try to send messages
on incorrect channels. Special thanks to our friends at Oasis Labs for surfacing
this issue!
- [p2p/node] [\#6339](https://github.com/tendermint/tendermint/issues/6339) Fix bug with using custom channels (@cmwaters)
- [light] [\#6346](https://github.com/tendermint/tendermint/issues/6346) Correctly handle too high errors to improve client robustness (@cmwaters)
## v0.34.9
*April 8, 2021*
This release fixes a moderate severity security issue, Security Advisory Alderfly,
which impacts all networks that rely on Tendermint light clients.
Further details will be released once networks have upgraded.
This release also includes a small Go API-breaking change, to reduce panics in the RPC layer.
Special thanks to our external contributors on this release: @gchaincl
### BREAKING CHANGES
- Go API
- [rpc/jsonrpc/server] [\#6204](https://github.com/tendermint/tendermint/issues/6204) Modify `WriteRPCResponseHTTP(Error)` to return an error (@melekes)
### FEATURES
- [rpc] [\#6226](https://github.com/tendermint/tendermint/issues/6226) Index block events and expose a new RPC method, `/block_search`, to allow querying for blocks by `BeginBlock` and `EndBlock` events (@alexanderbez)
### BUG FIXES
- [rpc/jsonrpc/server] [\#6191](https://github.com/tendermint/tendermint/issues/6191) Correctly unmarshal `RPCRequest` when data is `null` (@melekes)
- [p2p] [\#6289](https://github.com/tendermint/tendermint/issues/6289) Fix "unknown channels" bug on CustomReactors (@gchaincl)
- [light/evidence] Adds logic to handle forward lunatic attacks (@cmwaters)
## v0.34.8
*February 25, 2021*
@@ -257,6 +7,8 @@ Special thanks to our external contributors on this release: @gchaincl
This release, in conjunction with [a fix in the Cosmos SDK](https://github.com/cosmos/cosmos-sdk/pull/8641),
introduces changes that should mean the logs are much, much quieter. 🎉
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
### IMPROVEMENTS
- [libs/log] [\#6174](https://github.com/tendermint/tendermint/issues/6174) Include timestamp (`ts` field; `time.RFC3339Nano` format) in JSON logger output (@melekes)
@@ -294,13 +46,15 @@ use remote signer implementations instead of `FilePV` in production.
Thank you to @joe-bowman for his assistance with this vulnerability and a particular
shout-out to @marbar3778 for diagnosing it quickly.
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
### BUG FIXES
- [consensus] [\#6128](https://github.com/tendermint/tendermint/pull/6128) Remove privValidator from log call (@tessr)
## v0.34.6
*February 18, 2021*
*February 18, 2021*
_Tendermint Core v0.34.5 and v0.34.6 have been recalled due to release tooling problems._
@@ -308,29 +62,33 @@ _Tendermint Core v0.34.5 and v0.34.6 have been recalled due to release tooling p
*February 11, 2021*
This release includes a fix for a memory leak in the evidence reactor (see #6068, below).
All Tendermint clients are recommended to upgrade.
Thank you to our friends at Crypto.com for the initial report of this memory leak!
This release includes a fix for a memory leak in the evidence reactor (see #6068, below).
All Tendermint clients are recommended to upgrade.
Thank you to our friends at Crypto.com for the initial report of this memory leak!
Special thanks to other external contributors on this release: @yayajacky, @odidev, @laniehei, and @c29r3!
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
### BUG FIXES
- [light] [\#6022](https://github.com/tendermint/tendermint/pull/6022) Fix a bug when the number of validators equals 100 (@melekes)
- [light] [\#6026](https://github.com/tendermint/tendermint/pull/6026) Fix a bug when height isn't provided for the rpc calls: `/commit` and `/validators` (@cmwaters)
- [evidence] [\#6068](https://github.com/tendermint/tendermint/pull/6068) Terminate broadcastEvidenceRoutine when peer is stopped (@melekes)
## v0.34.3
## v0.34.3
*January 19, 2021*
This release includes a fix for a high-severity security vulnerability,
This release includes a fix for a high-severity security vulnerability,
a DoS-vector that impacted Tendermint Core v0.34.0-v0.34.2. For more details, see
[Security Advisory Mulberry](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg)
or https://nvd.nist.gov/vuln/detail/CVE-2021-21271.
[Security Advisory Mulberry](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg)
or https://nvd.nist.gov/vuln/detail/CVE-2021-21271.
Tendermint Core v0.34.3 also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
https://nvd.nist.gov/vuln/detail/CVE-2021-3121.
Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint).
### BUG FIXES
@@ -345,6 +103,8 @@ This release fixes a substantial bug in evidence handling where evidence could
sometimes be broadcast before the block containing that evidence was fully committed,
resulting in some nodes panicking when trying to verify said evidence.
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
### BREAKING CHANGES
- Go API
@@ -368,6 +128,8 @@ disconnecting from this node. As a temporary remedy (until the mempool package
is refactored), the `max-batch-bytes` was disabled. Transactions will be sent
one by one without batching.
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
### BREAKING CHANGES
- CLI/RPC/Config
@@ -396,6 +158,8 @@ Holy smokes, this is a big one! For a more reader-friendly overview of the chang
Special thanks to external contributors on this release: @james-ray, @fedekunze, @favadi, @alessio,
@joe-bowman, @cuonglm, @SadPencil and @dongsam.
And as always, friendly reminder, that we have a [bug bounty program](https://hackerone.com/tendermint).
### BREAKING CHANGES
- CLI/RPC/Config
@@ -422,14 +186,14 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
- [blockchain] [\#4637](https://github.com/tendermint/tendermint/pull/4637) Migrate blockchain reactor(s) to Protobuf encoding (@marbar3778)
- [evidence] [\#4949](https://github.com/tendermint/tendermint/pull/4949) Migrate evidence reactor to Protobuf encoding (@marbar3778)
- [mempool] [\#4940](https://github.com/tendermint/tendermint/pull/4940) Migrate mempool from to Protobuf encoding (@marbar3778)
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
- [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes)
- `MaxBatchBytes` new config setting defines the max size of one batch.
- [p2p/pex] [\#4973](https://github.com/tendermint/tendermint/pull/4973) Migrate `p2p/pex` reactor to Protobuf encoding (@marbar3778)
- [statesync] [\#4943](https://github.com/tendermint/tendermint/pull/4943) Migrate state sync reactor to Protobuf encoding (@marbar3778)
- Blockchain Protocol
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
- [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778)
- [evidence] [\#5499](https://github.com/tendermint/tendermint/pull/5449) Cap evidence to a maximum number of bytes (supercedes [\#4780](https://github.com/tendermint/tendermint/pull/4780)) (@cmwaters)
- [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) Header hashes are no longer empty for empty inputs, notably `DataHash`, `EvidenceHash`, and `LastResultsHash` (@erikgrinaker)
- [state] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Include `GasWanted` and `GasUsed` into `LastResultsHash` (@melekes)
@@ -488,7 +252,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
- [types] [\#4852](https://github.com/tendermint/tendermint/pull/4852) Vote & Proposal `SignBytes` is now func `VoteSignBytes` & `ProposalSignBytes` (@marbar3778)
- [types] [\#4798](https://github.com/tendermint/tendermint/pull/4798) Simplify `VerifyCommitTrusting` func + remove extra validation (@melekes)
- [types] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Remove `ABCIResult` (@melekes)
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
- [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778)
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `Total` in `Parts` & `PartSetHeader` has been changed from a `int` to a `uint32` (@marbar3778)
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Vote: `ValidatorIndex` & `Round` are now `int32` (@marbar3778)
- [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Proposal: `POLRound` & `Round` are now `int32` (@marbar3778)
@@ -526,7 +290,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze,
- [evidence] [\#4722](https://github.com/tendermint/tendermint/pull/4722) Consolidate evidence store and pool types to improve evidence DB (@cmwaters)
- [evidence] [\#4839](https://github.com/tendermint/tendermint/pull/4839) Reject duplicate evidence from being proposed (@cmwaters)
- [evidence] [\#5219](https://github.com/tendermint/tendermint/pull/5219) Change the source of evidence time to block time (@cmwaters)
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778)
- [light] [\#4935](https://github.com/tendermint/tendermint/pull/4935) Fetch and compare a new header with witnesses in parallel (@melekes)
- [light] [\#4929](https://github.com/tendermint/tendermint/pull/4929) Compare header with witnesses only when doing bisection (@melekes)
- [light] [\#4916](https://github.com/tendermint/tendermint/pull/4916) Validate basic for inbound validator sets and headers before further processing them (@cmwaters)
@@ -636,6 +400,9 @@ as 2/3+ of the signatures are checked._
Special thanks to @njmurarka at Bluzelle Networks for reporting this.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### SECURITY:
- [consensus] Do not allow signatures for a wrong block in commits (@ebuchman)
@@ -651,6 +418,8 @@ need to update your code.**
Special thanks to external contributors on this release: @tau3,
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
- Go API
@@ -710,6 +479,8 @@ Special thanks to external contributors on this release: @tau3,
Special thanks to external contributors on this release: @whylee259, @greg-szabo
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
- Go API
@@ -796,6 +567,9 @@ Notes:
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
and reporting this.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### SECURITY:
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
@@ -808,6 +582,8 @@ and reporting this.
Special thanks to external contributors on this release:
@antho1404, @michaelfig, @gterzian, @tau3, @Shivani912
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
- CLI/RPC/Config
@@ -858,6 +634,9 @@ Special thanks to external contributors on this release:
Special thanks to external contributors on this release:
@princesinha19
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### FEATURES:
- [rpc] [\#3333](https://github.com/tendermint/tendermint/issues/3333) Add `order_by` to `/tx_search` endpoint, allowing to change default ordering from asc to desc (@princesinha19)
@@ -876,6 +655,9 @@ Special thanks to external contributors on this release:
Special thanks to external contributors on this release: @mrekucci, @PSalant726, @princesinha19, @greg-szabo, @dongsam, @cuonglm, @jgimeno, @yenkhoon
Friendly reminder, we have a [bug bounty
program.](https://hackerone.com/tendermint).
*January 14, 2020*
This release contains breaking changes to the `Block#Header`, specifically
@@ -1104,6 +886,9 @@ Notes:
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
and reporting this.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### SECURITY:
- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr)
@@ -1115,6 +900,9 @@ _January, 9, 2020_
Special thanks to external contributors on this release: @greg-szabo, @gregzaitsev, @yenkhoon
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### FEATURES:
- [rpc/lib] [\#4248](https://github.com/tendermint/tendermint/issues/4248) RPC client basic authentication support (@greg-szabo)
@@ -1136,6 +924,9 @@ Special thanks to external contributors on this release: @greg-szabo, @gregzaits
Special thanks to external contributors on this release: @erikgrinaker, @guagualvcha, @hsyis, @cosmostuba, @whunmr, @austinabell
Friendly reminder, we have a [bug bounty
program.](https://hackerone.com/tendermint).
### BREAKING CHANGES:
@@ -1175,6 +966,9 @@ identified and fixed here.
Special thanks to [elvishacker](https://hackerone.com/elvishacker) for finding
and reporting this.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
- Go API
@@ -1201,6 +995,9 @@ accepting new peers and only allowing `ed25519` pubkeys.
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for pointing
this out.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### SECURITY:
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Only allow ed25519 pubkeys when connecting
@@ -1216,6 +1013,9 @@ All clients are recommended to upgrade. See
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for discovering
and reporting this issue.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### SECURITY:
- [p2p] [\#4030](https://github.com/tendermint/tendermint/issues/4030) Fix for panic on nil public key send to a peer
@@ -1226,6 +1026,9 @@ and reporting this issue.
Special thanks to external contributors on this release: @jon-certik, @gracenoah, @PSalant726, @gchaincl
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
- CLI/RPC/Config
@@ -1261,6 +1064,9 @@ guide.
Special thanks to external contributors on this release:
@gchaincl, @bluele, @climber73
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### IMPROVEMENTS:
- [consensus] [\#3839](https://github.com/tendermint/tendermint/issues/3839) Reduce "Error attempting to add vote" message severity (Error -> Info)
@@ -1281,6 +1087,9 @@ Special thanks to external contributors on this release:
Special thanks to external contributors on this release:
@ruseinov, @bluele, @guagualvcha
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
- Go API
@@ -1320,6 +1129,9 @@ This release contains a minor enhancement to the ABCI and some breaking changes
- CheckTx requests include a `CheckTxType` enum that can be set to `Recheck` to indicate to the application that this transaction was already checked/validated and certain expensive operations (like checking signatures) can be skipped
- Removed various functions from `libs` pkgs
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
- Go API
@@ -1365,6 +1177,9 @@ and the RPC, namely:
[docs](https://github.com/tendermint/tendermint/blob/60827f75623b92eff132dc0eff5b49d2025c591e/docs/spec/abci/abci.md#events)
- Bind RPC to localhost by default, not to the public interface [UPGRADING/RPC_Changes](./UPGRADING.md#rpc_changes)
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
* CLI/RPC/Config
@@ -1465,6 +1280,8 @@ Notes:
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding
and reporting this.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### SECURITY:
@@ -1485,6 +1302,8 @@ identified and fixed here.
Special thanks to [elvishacker](https://hackerone.com/elvishacker) for finding
and reporting this.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
@@ -1512,6 +1331,8 @@ accepting new peers and only allowing `ed25519` pubkeys.
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for pointing
this out.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### SECURITY:
@@ -1528,6 +1349,8 @@ All clients are recommended to upgrade. See
Special thanks to [fudongbai](https://hackerone.com/fudongbai) for discovering
and reporting this issue.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### SECURITY:
@@ -1823,6 +1646,8 @@ See the [v0.31.0
Milestone](https://github.com/tendermint/tendermint/milestone/19?closed=1) for
more details.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
@@ -2043,6 +1868,8 @@ This release contains two important fixes: one for p2p layer where we sometimes
were not closing connections and one for consensus layer where consensus with
no empty blocks (`create_empty_blocks = false`) could halt.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### IMPROVEMENTS:
- [pex] [\#3037](https://github.com/tendermint/tendermint/issues/3037) Only log "Reached max attempts to dial" once
@@ -2082,6 +1909,8 @@ While we are trying to stabilize the Block protocol to preserve compatibility
with old chains, there may be some final changes yet to come before Cosmos
launch as we continue to audit and test the software.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
@@ -2130,6 +1959,8 @@ launch as we continue to audit and test the software.
Special thanks to external contributors on this release:
@HaoyangLiu
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BUG FIXES:
- [consensus] Fix consensus halt from proposing blocks with too much evidence
@@ -2258,6 +2089,8 @@ Special thanks to @dlguddus for discovering a [major
issue](https://github.com/tendermint/tendermint/issues/2718#issuecomment-440888677)
in the proposer selection algorithm.
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
This release is primarily about fixes to the proposer selection algorithm
in preparation for the [Cosmos Game of
@@ -2320,6 +2153,8 @@ Special thanks to external contributors on this release:
@ackratos, @goolAdapter, @james-ray, @joe-bowman, @kostko,
@nagarajmanjunath, @tomtau
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### FEATURES:
@@ -2359,6 +2194,8 @@ Special thanks to external contributors on this release:
Special thanks to external contributors on this release:
@danil-lashin, @kevlubkcm, @krhubert, @srmo
Friendly reminder, we have a [bug bounty
program](https://hackerone.com/tendermint).
### BREAKING CHANGES:
@@ -2403,6 +2240,8 @@ Special thanks to external contributors on this release:
Special thanks to external contributors on this release: @hleb-albau, @zhuzeyu
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
### FEATURES:
- [rpc] [\#2582](https://github.com/tendermint/tendermint/issues/2582) Enable CORS on RPC API (@hleb-albau)
@@ -2420,6 +2259,8 @@ Special thanks to external contributors on this release: @hleb-albau, @zhuzeyu
Special thanks to external contributors on this release: @katakonst
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
### IMPROVEMENTS:
- [consensus] [\#2704](https://github.com/tendermint/tendermint/issues/2704) Simplify valid POL round logic
@@ -2593,6 +2434,8 @@ It also addresses some issues found via security audit, removes various unused
functions from `libs/common`, and implements
[ADR-012](https://github.com/tendermint/tendermint/blob/develop/docs/architecture/adr-012-peer-transport.md).
Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint).
BREAKING CHANGES:
* CLI/RPC/Config

View File

@@ -9,18 +9,83 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
### BREAKING CHANGES
- CLI/RPC/Config
- [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker)
- [config] \#5728 `fast_sync = "v1"` is no longer supported (@melekes)
- [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes)
- [cli] \#5777 use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters)
- [rpc] \#6019 standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters)
- [rpc] \#6168 Change default sorting to desc for `/tx_search` results (@melekes)
- [cli] \#6282 User must specify the node mode when using `tendermint init` (@cmwaters)
- Apps
- [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface
- [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`.
- [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters.
- P2P Protocol
- Go API
- [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes)
- [p2p] Removed unused function `MakePoWTarget`. (@erikgrinaker)
- [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes)
- [proto/p2p] Renamed `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker)
- [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker)
- [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio)
- [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters)
- [state] \#5864 Use an iterator when pruning state (@cmwaters)
- [types] \#6023 Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`.
- Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic.
- [light] \#6054 Move `MaxRetryAttempt` option from client to provider.
- `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters)
- [all] \#6077 Change spelling from British English to American (@cmwaters)
- Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub
- Rename "behaviour" pkg to "behavior" and internalized it in blockchain v2
- [rpc/client/http] \#6176 Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes)
- [rpc/client/http] \#6176 Unexpose `WSEvents` (@melekes)
- [rpc/jsonrpc/client/ws_client] \#6176 `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes)
- [rpc/jsonrpc/server] \#6204 Modify `WriteRPCResponseHTTP(Error)` to return an error (@melekes)
- Blockchain Protocol
- Data Storage
- [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters)
### FEATURES
- [rpc] \#6226 Index block events and expose a new RPC method, `/block_search`, to allow querying for blocks by `BeginBlock` and `EndBlock` events. (@alexanderbez)
- [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam
### IMPROVEMENTS
- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778)
- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys.
- [privval] \#5725 Add gRPC support to private validator.
- [privval] \#5876 `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778)
- [abci/client] \#5673 `Async` requests return an error if queue is full (@melekes)
- [mempool] \#5673 Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes)
- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778)
- [blockchain/v1] \#5728 Remove in favor of v2 (@melekes)
- [blockchain/v0] \#5741 Relax termination conditions and increase sync timeout (@melekes)
- [cli] \#5772 `gen_node_key` output now contains node ID (`id` field) (@melekes)
- [blockchain/v2] \#5774 Send status request when new peer joins (@melekes)
- [consensus] \#5792 Deprecates the `time_iota_ms` consensus parameter, to reduce the bug surface. The parameter is no longer used. (@valardragon)
- [store] \#5888 store.SaveBlock saves using batches instead of transactions for now to improve ACID properties. This is a quick fix for underlying issues around tm-db and ACID guarantees. (@githubsands)
- [consensus] \#5987 Remove `time_iota_ms` from consensus params. Merge `tmproto.ConsensusParams` and `abci.ConsensusParams`. (@marbar3778)
- [types] \#5994 Reduce the use of protobuf types in core logic. (@marbar3778)
- `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams`, `sm.Version` and `version.Consensus` have become native types. They still utilize protobuf when being sent over the wire or written to disk.
- [rpc/client/http] \#6163 Do not drop events even if the `out` channel is full (@melekes)
- [node] \#6059 Validate and complete genesis doc before saving to state store (@silasdavis)
- [state] \#6067 Batch save state data (@githubsands & @cmwaters)
- [crypto] \#6120 Implement batch verification interface for ed25519 and sr25519. (@marbar3778)
- [types] \#6120 use batch verification for verifying commits signatures.
- If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature.
- [privval/file] \#6185 Return error on `LoadFilePV`, `LoadFilePVEmptyState`. Allows for better programmatic control of Tendermint.
- [privval] /#6240 Add `context.Context` to privval interface.
### BUG FIXES
- [types] \#5523 Change json naming of `PartSetHeader` within `BlockID` from `parts` to `part_set_header` (@marbar3778)
- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash)
- [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes)
- [blockchain/v1] \#5711 Fix deadlock (@melekes)
- [rpc/jsonrpc/server] \#6191 Correctly unmarshal `RPCRequest` when data is `null` (@melekes)

View File

@@ -26,8 +26,7 @@ will indicate their support with a heartfelt emoji.
If the issue would benefit from thorough discussion, maintainers may
request that you create a [Request For
Comment](https://github.com/tendermint/spec/tree/master/rfc)
in the Tendermint spec repo. Discussion
Comment](https://github.com/tendermint/spec/tree/master/rfc). Discussion
at the RFC stage will build collective understanding of the dimensions
of the problems and help structure conversations around trade-offs.
@@ -227,96 +226,16 @@ Fixes #nnnn
Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though!
### Release procedure
### Release Procedure
#### A note about backport branches
Tendermint's `master` branch is under active development.
Releases are specified using tags and are built from long-lived "backport" branches.
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch,
and the backport branches have names like `v0.34.x` or `v0.33.x`
(literally, `x`; it is not a placeholder in this case).
As non-breaking changes land on `master`, they should also be backported (cherry-picked)
to these backport branches.
We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport
to the needed branch. There should be a label for any backport branch that you'll be targeting.
To notify the bot to backport a pull request, mark the pull request with
the label `S:backport-to-<backport_branch>`.
Once the original pull request is merged, the bot will try to cherry-pick the pull request
to the backport branch. If the bot fails to backport, it will open a pull request.
The author of the original pull request is responsible for solving the conflicts and
merging the pull request.
#### Creating a backport branch
If this is the first release candidate for a major release, you get to have the honor of creating
the backport branch!
Note that, after creating the backport branch, you'll also need to update the tags on `master`
so that `go mod` is able to order the branches correctly. You should tag `master` with a "dev" tag
that is "greater than" the backport branches tags. See #6072 for more context.
In the following example, we'll assume that we're making a backport branch for
the 0.35.x line.
1. Start on `master`
2. Create the backport branch:
`git checkout -b v0.35.x`
3. Go back to master and tag it as the dev branch for the _next_ major release and push it back up:
`git tag -a v0.36.0-dev; git push v0.36.0-dev`
4. Create a new workflow to run the e2e nightlies for this backport branch.
(See https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-34x.yml
for an example.)
#### Release candidates
Before creating an official release, especially a major release, we may want to create a
release candidate (RC) for our friends and partners to test out. We use git tags to
create RCs, and we build them off of backport branches.
Tags for RCs should follow the "standard" release naming conventions, with `-rcX` at the end
(for example, `v0.35.0-rc0`).
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
have distinct names from the tags/release names.)
If this is the first RC for a major release, you'll have to make a new backport branch (see above).
Otherwise:
1. Start from the backport branch (e.g. `v0.35.x`).
1. Run the integration tests and the e2e nightlies
(which can be triggered from the Github UI;
e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml).
1. Prepare the changelog:
- Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`.
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
all PRs
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
or other upgrading flows.
- Bump TMVersionDefault version in `version.go`
- Bump P2P and block protocol versions in `version.go`, if necessary
- Bump ABCI protocol version in `version.go`, if necessary
1. Open a PR with these changes against the backport branch.
1. Once these changes have landed on the backport branch, be sure to pull them back down locally.
2. Once you have the changes locally, create the new tag, specifying a name and a tag "message":
`git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0`
3. Push the tag back up to origin:
`git push origin v0.35.0-rc0`
Now the tag should be available on the repo's releases page.
4. Future RCs will continue to be built off of this branch.
Note that this process should only be used for "true" RCs--
release candidates that, if successful, will be the next release.
For more experimental "RCs," create a new, short-lived branch and tag that instead.
#### Major release
#### Major Release
This major release process assumes that this release was preceded by release candidates.
If there were no release candidates, begin by creating a backport branch, as described above.
If there were no release candidates, and you'd like to cut a major release directly from master, see below.
1. Start on the backport branch (e.g. `v0.35.x`)
2. Run integration tests and the e2e nightlies.
3. Prepare the release:
1. Start on the latest RC branch (`RCx/vX.X.0`).
2. Run integration tests.
3. Branch off of the RC branch (`git checkout -b release-prep`) and prepare the release:
- "Squash" changes from the changelog entries for the RCs into a single entry,
and add all changes included in `CHANGELOG_PENDING.md`.
(Squashing includes both combining all entries, as well as removing or simplifying
@@ -325,27 +244,59 @@ If there were no release candidates, begin by creating a backport branch, as des
all PRs
- Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes
or other upgrading flows.
- Bump TMVersionDefault version in `version.go`
- Bump P2P and block protocol versions in `version.go`, if necessary
- Bump ABCI protocol version in `version.go`, if necessary
4. Open a PR with these changes against the backport branch.
5. Once these changes are on the backport branch, push a tag with prepared release details.
This will trigger the actual release `v0.35.0`.
- `git tag -a v0.35.0 -m 'Release v0.35.0'`
- `git push origin v0.35.0`
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
4. Open a PR with these changes against the RC branch (`RCx/vX.X.0`).
5. Once these changes are on the RC branch, branch off of the RC branch again to create a release branch:
- `git checkout RCx/vX.X.0`
- `git checkout -b release/vX.X.0`
6. Push a tag with prepared release details. This will trigger the actual release `vX.X.0`.
- `git tag -a vX.X.0 -m 'Release vX.X.0'`
- `git push origin vX.X.0`
7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`.
8. Create the long-lived minor release branch `RC0/vX.X.1` for the next point release on this
new major release series.
#### Minor release (point releases)
##### Major Release (from `master`)
1. Start on `master`
2. Run integration tests (see `test_integrations` in Makefile)
3. Prepare release in a pull request against `master` (to be squash merged):
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`; if this release
had release candidates, squash all the RC updates into one
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for
all issues
- Run `bash ./scripts/authors.sh` to get a list of authors since the latest
release, and add the github aliases of external contributors to the top of
the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh <email>`
- Reset the `CHANGELOG_PENDING.md`
- Bump P2P and block protocol versions in `version.go`, if necessary
- Bump ABCI protocol version in `version.go`, if necessary
- Make sure all significant breaking changes are covered in `UPGRADING.md`
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
4. Push a tag with prepared release details (this will trigger the release `vX.X.0`)
- `git tag -a vX.X.x -m 'Release vX.X.x'`
- `git push origin vX.X.x`
5. Update the `CHANGELOG.md` file on master with the releases changelog.
6. Delete any RC branches and tags for this release (if applicable)
#### Minor Release (Point Releases)
Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master.
Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, and
the backport branches have names like `v0.34.x` or `v0.33.x` (literally, `x`; it is not a placeholder in this case).
As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches.
We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport to the needed branch. Depending on which backport branch you need to backport to there will be labels for them. To notify the bot to backport a pull request, mark the pull request with the label `backport-to-<backport_branch>`. Once the original pull request is merged, the bot will try to cherry-pick the pull request to the backport branch. If the bot fails to backport, it will open a pull request. The author of the original pull request is responsible for solving the conflicts and merging the pull request.
Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate.
To create a minor release:
1. Checkout the long-lived backport branch: `git checkout v0.35.x`
2. Run integration tests (`make test_integrations`) and the nightlies.
1. Checkout the long-lived backport branch: `git checkout vX.X.x`
2. Run integration tests: `make test_integrations`
3. Check out a new branch and prepare the release:
- Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`
- Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues
@@ -354,14 +305,35 @@ To create a minor release:
- Bump the ABCI version number, if necessary.
(Note that ABCI follows semver, and that ABCI versions are the only versions
which can change during minor releases, and only field additions are valid minor changes.)
4. Open a PR with these changes that will land them back on `v0.35.x`
- Add any release notes you would like to be added to the body of the release to `release_notes.md`.
4. Open a PR with these changes that will land them back on `vX.X.x`
5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag.
- `git tag -a v0.35.1 -m 'Release v0.35.1'`
- `git push origin v0.35.1`
- `git tag -a vX.X.x -m 'Release vX.X.x'`
- `git push origin vX.X.x`
6. Create a pull request back to master with the CHANGELOG & version changes from the latest release.
- Remove all `R:minor` labels from the pull requests that were included in the release.
- Do not merge the backport branch into master.
#### Release Candidates
Before creating an official release, especially a major release, we may want to create a
release candidate (RC) for our friends and partners to test out. We use git tags to
create RCs, and we build them off of RC branches. RC branches typically have names formatted
like `RCX/vX.X.X` (or, concretely, `RC0/v0.34.0`), while the tags themselves follow
the "standard" release naming conventions, with `-rcX` at the end (`vX.X.X-rcX`).
(Note that branches and tags _cannot_ have the same names, so it's important that these branches
have distinct names from the tags/release names.)
1. Start from the RC branch (e.g. `RC0/v0.34.0`).
2. Create the new tag, specifying a name and a tag "message":
`git tag -a v0.34.0-rc0 -m "Release Candidate v0.34.0-rc0`
3. Push the tag back up to origin:
`git push origin v0.34.0-rc4`
Now the tag should be available on the repo's releases page.
4. Create a new release candidate branch for any possible updates to the RC:
`git checkout -b RC1/v0.34.0; git push origin RC1/v0.34.0`
## Testing
### Unit tests

View File

@@ -1,5 +1,5 @@
# stage 1 Generate Tendermint Binary
FROM golang:1.16-alpine as builder
FROM golang:1.15-alpine as builder
RUN apk update && \
apk upgrade && \
apk --no-cache add make

View File

@@ -9,7 +9,7 @@ RUN wget http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm &
RUN yum -y groupinstall "Development Tools"
RUN yum -y install leveldb-devel which
ENV GOVERSION=1.16.5
ENV GOVERSION=1.12.9
RUN cd /tmp && \
wget https://dl.google.com/go/go${GOVERSION}.linux-amd64.tar.gz && \

View File

@@ -31,7 +31,7 @@ To get started developing applications, see the [application developers guide](h
A quick example of a built-in app and Tendermint core in one container.
```sh
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init validator
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init
docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint start --proxy-app=kvstore
```

View File

@@ -12,7 +12,7 @@ else
VERSION := $(shell git describe)
endif
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMVersion=$(VERSION)
LD_FLAGS = -X github.com/tendermint/tendermint/version.TMCoreSemVer=$(VERSION)
BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)"
HTTPS_GIT := https://github.com/tendermint/tendermint.git
DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf
@@ -202,7 +202,7 @@ format:
lint:
@echo "--> Running linter"
go run github.com/golangci/golangci-lint/cmd/golangci-lint run
@golangci-lint run
.PHONY: lint
DESTINATION = ./index.html.md
@@ -231,15 +231,6 @@ build-docker: build-linux
rm -rf DOCKER/tendermint
.PHONY: build-docker
###############################################################################
### Mocks ###
###############################################################################
mockery:
go generate -run="./scripts/mockery_generate.sh" ./...
.PHONY: mockery
###############################################################################
### Local testnet using docker ###
###############################################################################

View File

@@ -8,7 +8,7 @@ Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for shor
[![version](https://img.shields.io/github/tag/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/releases/latest)
[![API Reference](https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667)](https://pkg.go.dev/github.com/tendermint/tendermint)
[![Go version](https://img.shields.io/badge/go-1.16-blue.svg)](https://github.com/moovweb/gvm)
[![Go version](https://img.shields.io/badge/go-1.15-blue.svg)](https://github.com/moovweb/gvm)
[![Discord chat](https://img.shields.io/discord/669268347736686612.svg)](https://discord.gg/vcExX9T)
[![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE)
[![tendermint/tendermint](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint)
@@ -18,7 +18,8 @@ Or [Blockchain](<https://en.wikipedia.org/wiki/Blockchain_(database)>), for shor
|--------|--------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------|
| master | ![Tests](https://github.com/tendermint/tendermint/workflows/Tests/badge.svg?branch=master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) | ![Lint](https://github.com/tendermint/tendermint/workflows/Lint/badge.svg) |
Tendermint Core is a Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - and securely replicates it on many machines.
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language -
and securely replicates it on many machines.
For protocol details, see [the specification](https://github.com/tendermint/spec).
@@ -29,7 +30,9 @@ see our recent paper, "[The latest gossip on BFT consensus](https://arxiv.org/ab
Please do not depend on master as your production branch. Use [releases](https://github.com/tendermint/tendermint/releases) instead.
Tendermint has been in the production of private and public environments, most notably the blockchains of the Cosmos Network. we haven't released v1.0 yet since we are making breaking changes to the protocol and the APIs.
Tendermint is being used in production in both private and public environments,
most notably the blockchains of the [Cosmos Network](https://cosmos.network/).
However, we are still making breaking changes to the protocol and the APIs and have not yet released v1.0.
See below for more details about [versioning](#versioning).
In any case, if you intend to run Tendermint in production, we're happy to help. You can
@@ -48,7 +51,7 @@ to notify you of vulnerabilities and fixes in Tendermint Core. You can subscribe
| Requirement | Notes |
|-------------|------------------|
| Go version | Go1.16 or higher |
| Go version | Go1.15 or higher |
## Documentation
@@ -82,12 +85,32 @@ and familiarize yourself with our
Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and how the version changes.
According to SemVer, anything in the public API can change at any time before version 1.0.0
To provide some stability to users of 0.X.X versions of Tendermint, the MINOR version is used
to signal breaking changes across Tendermint's API. This API includes all
publicly exposed types, functions, and methods in non-internal Go packages as well as
the types and methods accessible via the Tendermint RPC interface.
To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used
to signal breaking changes across a subset of the total public API. This subset includes all
interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not
include the Go APIs.
Breaking changes to these public APIs will be documented in the CHANGELOG.
That said, breaking changes in the following packages will be documented in the
CHANGELOG even if they don't lead to MINOR version bumps:
- crypto
- config
- libs
- bits
- bytes
- json
- log
- math
- net
- os
- protoio
- rand
- sync
- strings
- service
- node
- rpc/client
- types
### Upgrades
@@ -144,4 +167,4 @@ If you'd like to work full-time on Tendermint Core, [we're hiring](https://inter
Funding for Tendermint Core development comes primarily from the [Interchain Foundation](https://interchain.io),
a Swiss non-profit. The Tendermint trademark is owned by [Tendermint Inc.](https://tendermint.com), the for-profit entity
that also maintains [tendermint.com](https://tendermint.com).
that also maintains [tendermint.com](https://tendermint.com).

View File

@@ -7,55 +7,54 @@ Policy](https://tendermint.com/security), we operate a [bug
bounty](https://hackerone.com/tendermint).
See the policy for more details on submissions and rewards, and see "Example Vulnerabilities" (below) for examples of the kinds of bugs we're most interested in.
### Guidelines
### Guidelines
We require that all researchers:
* Use the bug bounty to disclose all vulnerabilities, and avoid posting vulnerability information in public places, including Github Issues, Discord channels, and Telegram groups
* Make every effort to avoid privacy violations, degradation of user experience, disruption to production systems (including but not limited to the Cosmos Hub), and destruction of data
* Keep any information about vulnerabilities that youve discovered confidential between yourself and the Tendermint Core engineering team until the issue has been resolved and disclosed
* Keep any information about vulnerabilities that youve discovered confidential between yourself and the Tendermint Core engineering team until the issue has been resolved and disclosed
* Avoid posting personally identifiable information, privately or publicly
If you follow these guidelines when reporting an issue to us, we commit to:
* Not pursue or support any legal action related to your research on this vulnerability
* Work with you to understand, resolve and ultimately disclose the issue in a timely fashion
* Work with you to understand, resolve and ultimately disclose the issue in a timely fashion
## Disclosure Process
## Disclosure Process
Tendermint Core uses the following disclosure process:
1. Once a security report is received, the Tendermint Core team works to verify the issue and confirm its severity level using CVSS.
2. The Tendermint Core team collaborates with the Gaia team to determine the vulnerabilitys potential impact on the Cosmos Hub.
3. Patches are prepared for eligible releases of Tendermint in private repositories. See “Supported Releases” below for more information on which releases are considered eligible.
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE Numbering Authority.
1. Once a security report is received, the Tendermint Core team works to verify the issue and confirm its severity level using CVSS.
2. The Tendermint Core team collaborates with the Gaia team to determine the vulnerabilitys potential impact on the Cosmos Hub.
3. Patches are prepared for eligible releases of Tendermint in private repositories. See “Supported Releases” below for more information on which releases are considered eligible.
4. If it is determined that a CVE-ID is required, we request a CVE through a CVE Numbering Authority.
5. We notify the community that a security release is coming, to give users time to prepare their systems for the update. Notifications can include forum posts, tweets, and emails to partners and validators, including emails sent to the [Tendermint Security Mailing List](https://berlin.us4.list-manage.com/subscribe?u=431b35421ff7edcc77df5df10&id=3fe93307bc).
6. 24 hours following this notification, the fixes are applied publicly and new releases are issued.
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these releases, and then themselves issue new releases.
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we notify the community, again, through the same channels as above. We also publish a Security Advisory on Github and publish the CVE, as long as neither the Security Advisory nor the CVE include any information on how to exploit these vulnerabilities beyond what information is already available in the patch itself.
9. Once the community is notified, we will pay out any relevant bug bounties to submitters.
10. One week after the releases go out, we will publish a post with further details on the vulnerability as well as our response to it.
6. 24 hours following this notification, the fixes are applied publicly and new releases are issued.
7. Cosmos SDK and Gaia update their Tendermint Core dependencies to use these releases, and then themselves issue new releases.
8. Once releases are available for Tendermint Core, Cosmos SDK and Gaia, we notify the community, again, through the same channels as above. We also publish a Security Advisory on Github and publish the CVE, as long as neither the Security Advisory nor the CVE include any information on how to exploit these vulnerabilities beyond what information is already available in the patch itself.
9. Once the community is notified, we will pay out any relevant bug bounties to submitters.
10. One week after the releases go out, we will publish a post with further details on the vulnerability as well as our response to it.
This process can take some time. Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow the process described above to ensure that disclosures are handled consistently and to keep Tendermint Core and its downstream dependent projects--including but not limited to Gaia and the Cosmos Hub--as secure as possible.
This process can take some time. Every effort will be made to handle the bug in as timely a manner as possible, however it's important that we follow the process described above to ensure that disclosures are handled consistently and to keep Tendermint Core and its downstream dependent projects--including but not limited to Gaia and the Cosmos Hub--as secure as possible.
### Example Timeline
### Example Timeline
The following is an example timeline for the triage and response. The required roles and team members are described in parentheses after each task; however, multiple people can play each role and each person may play multiple roles.
The following is an example timeline for the triage and response. The required roles and team members are described in parentheses after each task; however, multiple people can play each role and each person may play multiple roles.
#### 24+ Hours Before Release Time
#### > 24 Hours Before Release Time
1. Request CVE number (ADMIN)
2. Gather emails and other contact info for validators (COMMS LEAD)
3. Create patches in a private security repo, and ensure that PRs are open targeting all relevant release branches (TENDERMINT ENG, TENDERMINT LEAD)
4. Test fixes on a testnet (TENDERMINT ENG, COSMOS SDK ENG)
5. Write “Security Advisory” for forum (TENDERMINT LEAD)
1. Request CVE number (ADMIN)
2. Gather emails and other contact info for validators (COMMS LEAD)
3. Test fixes on a testnet (TENDERMINT ENG, COSMOS ENG)
4. Write “Security Advisory” for forum (TENDERMINT LEAD)
#### 24 Hours Before Release Time
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
2. Post Tweet linking to forum post (COMMS LEAD)
3. Announce security advisory/link to post in various other social channels (Telegram, Discord) (COMMS LEAD)
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
1. Post “Security Advisory” pre-notification on forum (TENDERMINT LEAD)
2. Post Tweet linking to forum post (COMMS LEAD)
3. Announce security advisory/link to post in various other social channels (Telegram, Discord) (COMMS LEAD)
4. Send emails to validators or other users (PARTNERSHIPS LEAD)
#### Release Time
@@ -65,36 +64,36 @@ The following is an example timeline for the triage and response. The required r
4. Post “Security releases” on forum (TENDERMINT LEAD)
5. Post new Tweet linking to forum post (COMMS LEAD)
6. Remind everyone via social channels (Telegram, Discord) that the release is out (COMMS LEAD)
7. Send emails to validators or other users (COMMS LEAD)
8. Publish Security Advisory and CVE, if CVE has no sensitive information (ADMIN)
7. Send emails to validators or other users (COMMS LEAD)
8. Publish Security Advisory and CVE, if CVE has no sensitive information (ADMIN)
#### After Release Time
1. Write forum post with exploit details (TENDERMINT LEAD)
2. Approve pay-out on HackerOne for submitter (ADMIN)
2. Approve pay-out on HackerOne for submitter (ADMIN)
#### 7 Days After Release Time
1. Publish CVE if it has not yet been published (ADMIN)
1. Publish CVE if it has not yet been published (ADMIN)
2. Publish forum post with exploit details (TENDERMINT ENG, TENDERMINT LEAD)
## Supported Releases
The Tendermint Core team commits to releasing security patch releases for both the latest minor release as well for the major/minor release that the Cosmos Hub is running.
The Tendermint Core team commits to releasing security patch releases for both the latest minor release as well for the major/minor release that the Cosmos Hub is running.
If you are running older versions of Tendermint Core, we encourage you to upgrade at your earliest opportunity so that you can receive security patches directly from the Tendermint repo. While you are welcome to backport security patches to older versions for your own use, we will not publish or promote these backports.
If you are running older versions of Tendermint Core, we encourage you to upgrade at your earliest opportunity so that you can receive security patches directly from the Tendermint repo. While you are welcome to backport security patches to older versions for your own use, we will not publish or promote these backports.
## Scope
The full scope of our bug bounty program is outlined on our [Hacker One program page](https://hackerone.com/tendermint). Please also note that, in the interest of the safety of our users and staff, a few things are explicitly excluded from scope:
* Any third-party services
* Findings from physical testing, such as office access
* Any third-party services
* Findings from physical testing, such as office access
* Findings derived from social engineering (e.g., phishing)
## Example Vulnerabilities
## Example Vulnerabilities
The following is a list of examples of the kinds of vulnerabilities that were most interested in. It is not exhaustive: there are other kinds of issues we may also be interested in!
The following is a list of examples of the kinds of vulnerabilities that were most interested in. It is not exhaustive: there are other kinds of issues we may also be interested in!
### Specification
@@ -115,9 +114,6 @@ Assuming less than 1/3 of the voting power is Byzantine (malicious):
* A node halting (liveness failure)
* Syncing new and old nodes
Assuming more than 1/3 the voting power is Byzantine:
* Attacks that go unpunished (unhandled evidence)
### Networking
@@ -143,7 +139,7 @@ Attacks may come through the P2P network or the RPC layer:
### Libraries
* Serialization
* Serialization (Amino)
* Reading/Writing files and databases
### Cryptography
@@ -154,5 +150,5 @@ Attacks may come through the P2P network or the RPC layer:
### Light Client
* Core verification
* Core verification
* Bisection/sequential algorithms

View File

@@ -2,31 +2,19 @@
This guide provides instructions for upgrading to specific versions of Tendermint Core.
## v0.35
## Unreleased
### ABCI Changes
* Added `AbciVersion` to `RequestInfo`. Applications should check that the ABCI version they expect is being used in order to avoid unimplemented changes errors.
* The method `SetOption` has been removed from the ABCI.Client interface. This feature was used in the early ABCI implementation's.
* Messages are written to a byte stream using uin64 length delimiters instead of int64.
* When mempool `v1` is enabled, transactions broadcasted via `sync` mode may return a successful
response with a transaction hash indicating that the transaction was successfully inserted into
the mempool. While this is true for `v0`, the `v1` mempool reactor may at a later point in time
evict or even drop this transaction after a hash has been returned. Thus, the user or client must
query for that transaction to check if it is still in the mempool.
### Config Changes
* The configuration file field `[fastsync]` has been renamed to `[blocksync]`.
* The top level configuration file field `fast-sync` has moved under the new `[blocksync]`
field as `blocksync.enable`.
* `blocksync.version = "v1"` and `blocksync.version = "v2"` (previously `fastsync`)
are no longer supported. Please use `v0` instead. During the v0.35 release cycle, `v0` was
determined to suit the existing needs and the cost of maintaining the `v1` and `v2` modules
was determined to be greater than necessary.
* `fast_sync = "v1"` is no longer supported. Please use `v2` instead.
* All config parameters are now hyphen-case (also known as kebab-case) instead of snake_case. Before restarting the node make sure
you have updated all the variables in your `config.toml` file.
@@ -34,43 +22,10 @@ This guide provides instructions for upgrading to specific versions of Tendermin
* Added `--mode` flag and `mode` config variable on `config.toml` for setting Mode of the Node: `full` | `validator` | `seed` (default: `full`)
[ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md)
* `BootstrapPeers` has been added as part of the new p2p stack. This will eventually replace
`Seeds`. Bootstrap peers are connected with on startup if needed for peer discovery. Unlike
persistent peers, there's no gaurantee that the node will remain connected with these peers.
* configuration values starting with `priv-validator-` have moved to the new
`priv-validator` section, without the `priv-validator-` prefix.
* The fast sync process as well as the blockchain package and service has all
been renamed to block sync
### Database Key Format Changes
The format of all tendermint on-disk database keys changes in
0.35. Upgrading nodes must either re-sync all data or run a migration
script provided in this release. The script located in
`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go`
provides the function `Migrate(context.Context, db.DB)` which you can
operationalize as makes sense for your deployment.
For ease of use the `tendermint` command includes a CLI version of the
migration script, which you can invoke, as in:
tendermint key-migrate
This reads the configuration file as normal and allows the
`--db-backend` and `--db-dir` flags to change database operations as
needed.
The migration operation is idempotent and can be run more than once,
if needed.
### CLI Changes
* You must now specify the node mode (validator|full|seed) in `tendermint init [mode]`
* The `--fast-sync` command line option has been renamed to `--blocksync.enable`
* If you had previously used `tendermint gen_node_key` to generate a new node
key, keep in mind that it no longer saves the output to a file. You can use
`tendermint init validator` or pipe the output of `tendermint gen_node_key` to
@@ -82,118 +37,13 @@ if needed.
* CLI commands and flags are all now hyphen-case instead of snake_case.
Make sure to adjust any scripts that calls a cli command with snake_casing
### API Changes
The p2p layer was reimplemented as part of the 0.35 release cycle and
all reactors were refactored to accomodate the change. As part of that work these
implementations moved into the `internal` package and are no longer
considered part of the public Go API of tendermint. These packages
are:
- `p2p`
- `mempool`
- `consensus`
- `statesync`
- `blockchain`
- `evidence`
Accordingly, the `node` package was changed to reduce access to
tendermint internals: applications that use tendermint as a library
will need to change to accommodate these changes. Most notably:
- The `Node` type has become internal, and all constructors return a
`service.Service` implementation.
- The `node.DefaultNewNode` and `node.NewNode` constructors are no
longer exported and have been replaced with `node.New` and
`node.NewDefault` which provide more functional interfaces.
### gRPC Support
Mark gRPC in the RPC layer as deprecated and to be removed in 0.36.
### Peer Management Interface
When running with the new P2P Layer, the methods `UnsafeDialSeeds` and
`UnsafeDialPeers` RPC methods will always return an error. They are
deprecated and will be removed in 0.36 when the legacy peer stack is
removed.
Additionally the format of the Peer list returned in the `NetInfo`
method changes in this release to accommodate the different way that
the new stack tracks data about peers. This change affects users of
both stacks.
### Using the updated p2p library
The P2P library was reimplemented in this release. The new implementation is
enabled by default in this version of Tendermint. The legacy implementation is still
included in this version of Tendermint as a backstop to work around unforeseen
production issues. The new and legacy version are interoperable. If necessary,
you can enable the legacy implementation in the server configuration file.
To make use of the legacy P2P implemementation add or update the following field of
your server's configuration file under the `[p2p]` section:
```toml
[p2p]
...
use-legacy = true
...
```
If you need to do this, please consider filing an issue in the Tendermint repository
to let us know why. We plan to remove the legacy P2P code in the next (v0.36) release.
#### New p2p queue types
The new p2p implementation enables selection of the queue type to be used for
passing messages between peers.
The following values may be used when selecting which queue type to use:
* `fifo`: (**default**) An unbuffered and lossless queue that passes messages through
in the order in which they were received.
* `priority`: A priority queue of messages.
* `wdrr`: A queue implementing the Weighted Deficit Round Robin algorithm. A
weighted deficit round robin queue is created per peer. Each queue contains a
separate 'flow' for each of the channels of communication that exist between any two
peers. Tendermint maintains a channel per message type between peers. Each WDRR
queue maintains a shared buffered with a fixed capacity through which messages on different
flows are passed.
For more information on WDRR scheduling, see: https://en.wikipedia.org/wiki/Deficit_round_robin
To select a queue type, add or update the following field under the `[p2p]`
section of your server's configuration file.
```toml
[p2p]
...
queue-type = wdrr
...
```
### Support for Custom Reactor and Mempool Implementations
The changes to p2p layer removed existing support for custom
reactors. Based on our understanding of how this functionality was
used, the introduction of the prioritized mempool covers nearly all of
the use cases for custom reactors. If you are currently running custom
reactors and mempools and are having trouble seeing the migration path
for your project please feel free to reach out to the Tendermint Core
development team directly.
## v0.34.0
**Upgrading to Tendermint 0.34 requires a blockchain restart.**
This release is not compatible with previous blockchains due to changes to
the encoding format (see "Protocol Buffers," below) and the block header (see "Blockchain Protocol").
Note also that Tendermint 0.34 also requires Go 1.16 or higher.
Note also that Tendermint 0.34 also requires Go 1.15 or higher.
### ABCI Changes
@@ -340,8 +190,8 @@ Other user-relevant changes include:
* The old `lite` package was removed; the new light client uses the `light` package.
* The `Verifier` was broken up into two pieces:
* Core verification logic (pure `VerifyX` functions)
* `Client` object, which represents the complete light client
* Core verification logic (pure `VerifyX` functions)
* `Client` object, which represents the complete light client
* The new light clients stores headers & validator sets as `LightBlock`s
* The RPC client can be found in the `/rpc` directory.
* The HTTP(S) proxy is located in the `/proxy` directory.
@@ -473,12 +323,12 @@ Evidence Params has been changed to include duration.
### Go API
* `libs/common` has been removed in favor of specific pkgs.
* `async`
* `service`
* `rand`
* `net`
* `strings`
* `cmap`
* `async`
* `service`
* `rand`
* `net`
* `strings`
* `cmap`
* removal of `errors` pkg
### RPC Changes
@@ -547,9 +397,9 @@ Prior to the update, suppose your `ResponseDeliverTx` look like:
```go
abci.ResponseDeliverTx{
Tags: []kv.Pair{
{Key: []byte("sender"), Value: []byte("foo")},
{Key: []byte("recipient"), Value: []byte("bar")},
{Key: []byte("amount"), Value: []byte("35")},
{Key: []byte("sender"), Value: []byte("foo")},
{Key: []byte("recipient"), Value: []byte("bar")},
{Key: []byte("amount"), Value: []byte("35")},
}
}
```
@@ -568,14 +418,14 @@ the following `Events`:
```go
abci.ResponseDeliverTx{
Events: []abci.Event{
{
Type: "transfer",
Attributes: kv.Pairs{
{Key: []byte("sender"), Value: []byte("foo")},
{Key: []byte("recipient"), Value: []byte("bar")},
{Key: []byte("amount"), Value: []byte("35")},
},
}
{
Type: "transfer",
Attributes: kv.Pairs{
{Key: []byte("sender"), Value: []byte("foo")},
{Key: []byte("recipient"), Value: []byte("bar")},
{Key: []byte("amount"), Value: []byte("35")},
},
}
}
```
@@ -623,9 +473,9 @@ In this case, the WS client will receive an error with description:
"jsonrpc": "2.0",
"id": "{ID}#event",
"error": {
"code": -32000,
"msg": "Server error",
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
"code": -32000,
"msg": "Server error",
"data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)"
}
}
@@ -831,9 +681,9 @@ just the `Data` field set:
```go
[]ProofOp{
ProofOp{
Data: <proof bytes>,
}
ProofOp{
Data: <proof bytes>,
}
}
```

View File

@@ -6,8 +6,8 @@ import (
"sync"
"github.com/tendermint/tendermint/abci/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/libs/service"
tmsync "github.com/tendermint/tendermint/libs/sync"
)
const (
@@ -15,7 +15,7 @@ const (
echoRetryIntervalSeconds = 1
)
//go:generate ../../scripts/mockery_generate.sh Client
//go:generate mockery --case underscore --name Client
// Client defines an interface for an ABCI client.
//

View File

@@ -10,9 +10,9 @@ import (
"google.golang.org/grpc"
"github.com/tendermint/tendermint/abci/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
tmnet "github.com/tendermint/tendermint/libs/net"
"github.com/tendermint/tendermint/libs/service"
tmsync "github.com/tendermint/tendermint/libs/sync"
)
// A gRPC client.

View File

@@ -4,8 +4,8 @@ import (
"context"
types "github.com/tendermint/tendermint/abci/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/libs/service"
tmsync "github.com/tendermint/tendermint/libs/sync"
)
// NOTE: use defer to unlock mutex because Application might panic (e.g., in

View File

@@ -1,4 +1,4 @@
// Code generated by mockery. DO NOT EDIT.
// Code generated by mockery v2.5.1. DO NOT EDIT.
package mocks
@@ -796,8 +796,3 @@ func (_m *Client) String() string {
return r0
}
// Wait provides a mock function with given fields:
func (_m *Client) Wait() {
_m.Called()
}

View File

@@ -12,10 +12,10 @@ import (
"time"
"github.com/tendermint/tendermint/abci/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/libs/timer"
tmnet "github.com/tendermint/tendermint/libs/net"
"github.com/tendermint/tendermint/libs/service"
tmsync "github.com/tendermint/tendermint/libs/sync"
"github.com/tendermint/tendermint/libs/timer"
)
const (

View File

@@ -6,14 +6,13 @@ import (
"testing"
"time"
"math/rand"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/server"
"github.com/tendermint/tendermint/abci/types"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/libs/service"
)
@@ -102,7 +101,7 @@ func TestHangingSyncCalls(t *testing.T) {
func setupClientServer(t *testing.T, app types.Application) (
service.Service, abcicli.Client) {
// some port between 20k and 30k
port := 20000 + rand.Int31()%10000
port := 20000 + tmrand.Int32()%10000
addr := fmt.Sprintf("localhost:%d", port)
s, err := server.NewServer(addr, "socket", app)

View File

@@ -17,6 +17,7 @@ import (
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/example/counter"
"github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/abci/server"
servertest "github.com/tendermint/tendermint/abci/tests/server"
@@ -46,6 +47,9 @@ var (
flagHeight int
flagProve bool
// counter
flagSerial bool
// kvstore
flagPersist string
)
@@ -57,14 +61,19 @@ var RootCmd = &cobra.Command{
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
switch cmd.Use {
case "kvstore", "version":
case "counter", "kvstore": // for the examples apps, don't pre-run
return nil
case "version": // skip running for version command
return nil
}
if logger == nil {
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
allowLevel, err := log.AllowLevel(flagLogLevel)
if err != nil {
return err
}
logger = log.NewFilter(log.NewTMLogger(log.NewSyncWriter(os.Stdout)), allowLevel)
}
if client == nil {
var err error
client, err = abcicli.NewClient(flagAddress, flagAbci, false)
@@ -129,6 +138,10 @@ func addQueryFlags() {
"whether or not to return a merkle proof of the query result")
}
func addCounterFlags() {
counterCmd.PersistentFlags().BoolVarP(&flagSerial, "serial", "", false, "enforce incrementing (serial) transactions")
}
func addKVStoreFlags() {
kvstoreCmd.PersistentFlags().StringVarP(&flagPersist, "persist", "", "", "directory to use for a database")
}
@@ -147,6 +160,8 @@ func addCommands() {
RootCmd.AddCommand(queryCmd)
// examples
addCounterFlags()
RootCmd.AddCommand(counterCmd)
addKVStoreFlags()
RootCmd.AddCommand(kvstoreCmd)
}
@@ -246,6 +261,14 @@ var queryCmd = &cobra.Command{
RunE: cmdQuery,
}
var counterCmd = &cobra.Command{
Use: "counter",
Short: "ABCI demo example",
Long: "ABCI demo example",
Args: cobra.ExactArgs(0),
RunE: cmdCounter,
}
var kvstoreCmd = &cobra.Command{
Use: "kvstore",
Short: "ABCI demo example",
@@ -573,8 +596,34 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
return nil
}
func cmdCounter(cmd *cobra.Command, args []string) error {
app := counter.NewApplication(flagSerial)
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
// Start the listener
srv, err := server.NewServer(flagAddress, flagAbci, app)
if err != nil {
return err
}
srv.SetLogger(logger.With("module", "abci-server"))
if err := srv.Start(); err != nil {
return err
}
// Stop upon receiving SIGTERM or CTRL-C.
tmos.TrapSignal(logger, func() {
// Cleanup
if err := srv.Stop(); err != nil {
logger.Error("Error while stopping server", "err", err)
}
})
// Run forever.
select {}
}
func cmdKVStore(cmd *cobra.Command, args []string) error {
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
// Create the application - in memory or persisted to disk
var app types.Application

View File

@@ -0,0 +1,86 @@
package counter
import (
"encoding/binary"
"fmt"
"github.com/tendermint/tendermint/abci/example/code"
"github.com/tendermint/tendermint/abci/types"
)
type Application struct {
types.BaseApplication
hashCount int
txCount int
serial bool
}
func NewApplication(serial bool) *Application {
return &Application{serial: serial}
}
func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
}
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
if app.serial {
if len(req.Tx) > 8 {
return types.ResponseDeliverTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
}
tx8 := make([]byte, 8)
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
txValue := binary.BigEndian.Uint64(tx8)
if txValue != uint64(app.txCount) {
return types.ResponseDeliverTx{
Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
}
}
app.txCount++
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
}
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
if app.serial {
if len(req.Tx) > 8 {
return types.ResponseCheckTx{
Code: code.CodeTypeEncodingError,
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
}
tx8 := make([]byte, 8)
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
txValue := binary.BigEndian.Uint64(tx8)
if txValue < uint64(app.txCount) {
return types.ResponseCheckTx{
Code: code.CodeTypeBadNonce,
Log: fmt.Sprintf("Invalid nonce. Expected >= %v, got %v", app.txCount, txValue)}
}
}
return types.ResponseCheckTx{Code: code.CodeTypeOK}
}
func (app *Application) Commit() (resp types.ResponseCommit) {
app.hashCount++
if app.txCount == 0 {
return types.ResponseCommit{}
}
hash := make([]byte, 8)
binary.BigEndian.PutUint64(hash, uint64(app.txCount))
return types.ResponseCommit{Data: hash}
}
func (app *Application) Query(reqQuery types.RequestQuery) types.ResponseQuery {
switch reqQuery.Path {
case "hash":
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.hashCount))}
case "tx":
return types.ResponseQuery{Value: []byte(fmt.Sprintf("%v", app.txCount))}
default:
return types.ResponseQuery{Log: fmt.Sprintf("Invalid query path. Expected hash or tx, got %v", reqQuery.Path)}
}
}

View File

@@ -1,8 +1,6 @@
package kvstore
import (
mrand "math/rand"
"github.com/tendermint/tendermint/abci/types"
tmrand "github.com/tendermint/tendermint/libs/rand"
)
@@ -11,8 +9,7 @@ import (
// from the input value
func RandVal(i int) types.ValidatorUpdate {
pubkey := tmrand.Bytes(32)
// Random value between [0, 2^16 - 1]
power := mrand.Uint32() & (1<<16 - 1) // nolint:gosec // G404: Use of weak random number generator
power := tmrand.Uint16() + 1
v := types.UpdateValidator(pubkey, int64(power), "")
return v
}

View File

@@ -87,16 +87,15 @@ func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo)
// tx is either "key=value" or just arbitrary bytes
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
var key, value string
var key, value []byte
parts := bytes.Split(req.Tx, []byte("="))
if len(parts) == 2 {
key, value = string(parts[0]), string(parts[1])
key, value = parts[0], parts[1]
} else {
key, value = string(req.Tx), string(req.Tx)
key, value = req.Tx, req.Tx
}
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
err := app.state.db.Set(prefixKey(key), value)
if err != nil {
panic(err)
}
@@ -106,10 +105,10 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli
{
Type: "app",
Attributes: []types.EventAttribute{
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
{Key: "key", Value: key, Index: true},
{Key: "index_key", Value: "index is working", Index: true},
{Key: "noindex_key", Value: "index is working", Index: false},
{Key: []byte("creator"), Value: []byte("Cosmoshi Netowoko"), Index: true},
{Key: []byte("key"), Value: key, Index: true},
{Key: []byte("index_key"), Value: []byte("index is working"), Index: true},
{Key: []byte("noindex_key"), Value: []byte("index is working"), Index: false},
},
},
}

View File

@@ -9,10 +9,10 @@ import (
"runtime"
"github.com/tendermint/tendermint/abci/types"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
tmlog "github.com/tendermint/tendermint/libs/log"
tmnet "github.com/tendermint/tendermint/libs/net"
"github.com/tendermint/tendermint/libs/service"
tmsync "github.com/tendermint/tendermint/libs/sync"
)
// var maxNumberConnections = 2

View File

@@ -5,7 +5,6 @@ import (
"context"
"errors"
"fmt"
mrand "math/rand"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/types"
@@ -19,8 +18,7 @@ func InitChain(client abcicli.Client) error {
vals := make([]types.ValidatorUpdate, total)
for i := 0; i < total; i++ {
pubkey := tmrand.Bytes(33)
// nolint:gosec // G404: Use of weak random number generator
power := mrand.Int()
power := tmrand.Int()
vals[i] = types.UpdateValidator(pubkey, int64(power), "")
}
_, err := client.InitChainSync(ctx, types.RequestInitChain{

View File

@@ -0,0 +1,56 @@
package main
import (
"bytes"
"context"
"fmt"
"os"
abcicli "github.com/tendermint/tendermint/abci/client"
"github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/libs/log"
)
var ctx = context.Background()
func startClient(abciType string) abcicli.Client {
// Start client
client, err := abcicli.NewClient("tcp://127.0.0.1:26658", abciType, true)
if err != nil {
panic(err.Error())
}
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
client.SetLogger(logger.With("module", "abcicli"))
if err := client.Start(); err != nil {
panicf("connecting to abci_app: %v", err.Error())
}
return client
}
func commit(client abcicli.Client, hashExp []byte) {
res, err := client.CommitSync(ctx)
if err != nil {
panicf("client error: %v", err)
}
if !bytes.Equal(res.Data, hashExp) {
panicf("Commit hash was unexpected. Got %X expected %X", res.Data, hashExp)
}
}
func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
if err != nil {
panicf("client error: %v", err)
}
if res.Code != codeExp {
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", res.Code, codeExp, res.Log)
}
if !bytes.Equal(res.Data, dataExp) {
panicf("DeliverTx response data was unexpected. Got %X expected %X", res.Data, dataExp)
}
}
func panicf(format string, a ...interface{}) {
panic(fmt.Sprintf(format, a...))
}

View File

@@ -0,0 +1,93 @@
package main
import (
"fmt"
"log"
"os"
"os/exec"
"time"
"github.com/tendermint/tendermint/abci/types"
)
var abciType string
func init() {
abciType = os.Getenv("ABCI")
if abciType == "" {
abciType = "socket"
}
}
func main() {
testCounter()
}
const (
maxABCIConnectTries = 10
)
func ensureABCIIsUp(typ string, n int) error {
var err error
cmdString := "abci-cli echo hello"
if typ == "grpc" {
cmdString = "abci-cli --abci grpc echo hello"
}
for i := 0; i < n; i++ {
cmd := exec.Command("bash", "-c", cmdString)
_, err = cmd.CombinedOutput()
if err == nil {
break
}
time.Sleep(500 * time.Millisecond)
}
return err
}
func testCounter() {
abciApp := os.Getenv("ABCI_APP")
if abciApp == "" {
panic("No ABCI_APP specified")
}
fmt.Printf("Running %s test with abci=%s\n", abciApp, abciType)
subCommand := fmt.Sprintf("abci-cli %s", abciApp)
cmd := exec.Command("bash", "-c", subCommand)
cmd.Stdout = os.Stdout
if err := cmd.Start(); err != nil {
log.Fatalf("starting %q err: %v", abciApp, err)
}
defer func() {
if err := cmd.Process.Kill(); err != nil {
log.Printf("error on process kill: %v", err)
}
if err := cmd.Wait(); err != nil {
log.Printf("error while waiting for cmd to exit: %v", err)
}
}()
if err := ensureABCIIsUp(abciType, maxABCIConnectTries); err != nil {
log.Fatalf("echo failed: %v", err) //nolint:gocritic
}
client := startClient(abciType)
defer func() {
if err := client.Stop(); err != nil {
log.Printf("error trying client stop: %v", err)
}
}()
// commit(client, nil)
// deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
commit(client, nil)
deliverTx(client, []byte{0x00}, types.CodeTypeOK, nil)
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
// deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
deliverTx(client, []byte{0x01}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x02}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x03}, types.CodeTypeOK, nil)
deliverTx(client, []byte{0x00, 0x00, 0x04}, types.CodeTypeOK, nil)
// deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
}

28
abci/tests/test_app/test.sh Executable file
View File

@@ -0,0 +1,28 @@
#! /bin/bash
set -e
# These tests spawn the counter app and server by execing the ABCI_APP command and run some simple client tests against it
# Get the directory of where this script is.
export PATH="$GOBIN:$PATH"
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done
DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )"
# Change into that dir because we expect that.
cd "$DIR"
echo "RUN COUNTER OVER SOCKET"
# test golang counter
ABCI_APP="counter" go run -mod=readonly ./*.go
echo "----------------------"
echo "RUN COUNTER OVER GRPC"
# test golang counter via grpc
ABCI_APP="counter --abci=grpc" ABCI="grpc" go run -mod=readonly ./*.go
echo "----------------------"
# test nodejs counter
# TODO: fix node app
#ABCI_APP="node $GOPATH/src/github.com/tendermint/js-abci/example/app.js" go test -test.run TestCounter

View File

@@ -37,6 +37,7 @@ function testExample() {
}
testExample 1 tests/test_cli/ex1.abci abci-cli kvstore
testExample 2 tests/test_cli/ex2.abci abci-cli counter
echo ""
echo "PASS"

View File

@@ -4,7 +4,7 @@ import (
"io"
"github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/internal/libs/protoio"
"github.com/tendermint/tendermint/libs/protoio"
)
const (
@@ -15,7 +15,11 @@ const (
func WriteMessage(msg proto.Message, w io.Writer) error {
protoWriter := protoio.NewDelimitedWriter(w)
_, err := protoWriter.WriteMsg(msg)
return err
if err != nil {
return err
}
return nil
}
// ReadMessage reads a varint length-delimited protobuf message.

View File

@@ -25,7 +25,7 @@ func TestMarshalJSON(t *testing.T) {
{
Type: "testEvent",
Attributes: []EventAttribute{
{Key: "pho", Value: "bo"},
{Key: []byte("pho"), Value: []byte("bo")},
},
},
},
@@ -92,7 +92,7 @@ func TestWriteReadMessage2(t *testing.T) {
{
Type: "testEvent",
Attributes: []EventAttribute{
{Key: "abc", Value: "def"},
{Key: []byte("abc"), Value: []byte("def")},
},
},
},

View File

@@ -6,7 +6,6 @@ import (
"github.com/tendermint/tendermint/crypto/ed25519"
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
"github.com/tendermint/tendermint/crypto/secp256k1"
"github.com/tendermint/tendermint/crypto/sr25519"
)
func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate {
@@ -18,6 +17,7 @@ func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate {
}
return ValidatorUpdate{
// Address:
PubKey: pkp,
Power: power,
}
@@ -34,16 +34,7 @@ func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate {
panic(err)
}
return ValidatorUpdate{
PubKey: pkp,
Power: power,
}
case sr25519.KeyType:
pke := sr25519.PubKey(pk)
pkp, err := cryptoenc.PubKeyToProto(pke)
if err != nil {
panic(err)
}
return ValidatorUpdate{
// Address:
PubKey: pkp,
Power: power,
}

View File

@@ -1835,11 +1835,6 @@ type ResponseCheckTx struct {
GasUsed int64 `protobuf:"varint,6,opt,name=gas_used,proto3" json:"gas_used,omitempty"`
Events []Event `protobuf:"bytes,7,rep,name=events,proto3" json:"events,omitempty"`
Codespace string `protobuf:"bytes,8,opt,name=codespace,proto3" json:"codespace,omitempty"`
Sender string `protobuf:"bytes,9,opt,name=sender,proto3" json:"sender,omitempty"`
Priority int64 `protobuf:"varint,10,opt,name=priority,proto3" json:"priority,omitempty"`
// mempool_error is set by Tendermint.
// ABCI applictions creating a ResponseCheckTX should not set mempool_error.
MempoolError string `protobuf:"bytes,11,opt,name=mempool_error,json=mempoolError,proto3" json:"mempool_error,omitempty"`
}
func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} }
@@ -1931,27 +1926,6 @@ func (m *ResponseCheckTx) GetCodespace() string {
return ""
}
func (m *ResponseCheckTx) GetSender() string {
if m != nil {
return m.Sender
}
return ""
}
func (m *ResponseCheckTx) GetPriority() int64 {
if m != nil {
return m.Priority
}
return 0
}
func (m *ResponseCheckTx) GetMempoolError() string {
if m != nil {
return m.MempoolError
}
return ""
}
type ResponseDeliverTx struct {
Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"`
Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
@@ -2466,8 +2440,8 @@ func (m *Event) GetAttributes() []EventAttribute {
// EventAttribute is a single key-value pair, associated with an event.
type EventAttribute struct {
Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
Index bool `protobuf:"varint,3,opt,name=index,proto3" json:"index,omitempty"`
}
@@ -2504,18 +2478,18 @@ func (m *EventAttribute) XXX_DiscardUnknown() {
var xxx_messageInfo_EventAttribute proto.InternalMessageInfo
func (m *EventAttribute) GetKey() string {
func (m *EventAttribute) GetKey() []byte {
if m != nil {
return m.Key
}
return ""
return nil
}
func (m *EventAttribute) GetValue() string {
func (m *EventAttribute) GetValue() []byte {
if m != nil {
return m.Value
}
return ""
return nil
}
func (m *EventAttribute) GetIndex() bool {
@@ -2964,172 +2938,169 @@ func init() {
func init() { proto.RegisterFile("tendermint/abci/types.proto", fileDescriptor_252557cfdd89a31a) }
var fileDescriptor_252557cfdd89a31a = []byte{
// 2627 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0xcd, 0x73, 0xdb, 0xc6,
0x15, 0xe7, 0x37, 0x89, 0x47, 0x91, 0xa2, 0xd6, 0x8a, 0x43, 0x33, 0xb6, 0xe4, 0xc0, 0xe3, 0x34,
0x76, 0x12, 0xa9, 0x91, 0xc7, 0xae, 0x33, 0xe9, 0x47, 0x44, 0x9a, 0x2e, 0x15, 0xab, 0x92, 0xba,
0xa2, 0x9d, 0x49, 0xdb, 0x18, 0x01, 0x89, 0x15, 0x89, 0x98, 0x04, 0x10, 0x60, 0x29, 0x4b, 0x39,
0x76, 0xda, 0x8b, 0xa7, 0x07, 0x1f, 0x7b, 0xc9, 0x4c, 0xff, 0x83, 0x5e, 0x7b, 0xea, 0xa9, 0x87,
0x1c, 0xda, 0x99, 0x1c, 0x7b, 0xe8, 0xa4, 0x1d, 0xfb, 0xd6, 0x7f, 0xa0, 0xa7, 0xce, 0x74, 0xf6,
0x03, 0x20, 0x40, 0x12, 0x22, 0xd5, 0xf4, 0xd6, 0xdb, 0xee, 0xc3, 0x7b, 0x8f, 0xbb, 0x6f, 0xf7,
0xfd, 0xf6, 0xb7, 0x6f, 0x09, 0xaf, 0x51, 0x62, 0x19, 0xc4, 0x1d, 0x9a, 0x16, 0xdd, 0xd4, 0x3b,
0x5d, 0x73, 0x93, 0x9e, 0x3a, 0xc4, 0xdb, 0x70, 0x5c, 0x9b, 0xda, 0x68, 0x79, 0xfc, 0x71, 0x83,
0x7d, 0xac, 0x5d, 0x09, 0x69, 0x77, 0xdd, 0x53, 0x87, 0xda, 0x9b, 0x8e, 0x6b, 0xdb, 0x47, 0x42,
0xbf, 0x76, 0x39, 0xf4, 0x99, 0xfb, 0x09, 0x7b, 0x8b, 0x7c, 0x95, 0xc6, 0x4f, 0xc8, 0xa9, 0xff,
0xf5, 0xca, 0x94, 0xad, 0xa3, 0xbb, 0xfa, 0xd0, 0xff, 0xbc, 0xde, 0xb3, 0xed, 0xde, 0x80, 0x6c,
0xf2, 0x5e, 0x67, 0x74, 0xb4, 0x49, 0xcd, 0x21, 0xf1, 0xa8, 0x3e, 0x74, 0xa4, 0xc2, 0x6a, 0xcf,
0xee, 0xd9, 0xbc, 0xb9, 0xc9, 0x5a, 0x42, 0xaa, 0xfe, 0x25, 0x0f, 0x79, 0x4c, 0x3e, 0x1f, 0x11,
0x8f, 0xa2, 0x2d, 0xc8, 0x90, 0x6e, 0xdf, 0xae, 0x26, 0xaf, 0x26, 0xdf, 0x2c, 0x6e, 0x5d, 0xde,
0x98, 0x98, 0xdc, 0x86, 0xd4, 0x6b, 0x76, 0xfb, 0x76, 0x2b, 0x81, 0xb9, 0x2e, 0xba, 0x0d, 0xd9,
0xa3, 0xc1, 0xc8, 0xeb, 0x57, 0x53, 0xdc, 0xe8, 0x4a, 0x9c, 0xd1, 0x7d, 0xa6, 0xd4, 0x4a, 0x60,
0xa1, 0xcd, 0x7e, 0xca, 0xb4, 0x8e, 0xec, 0x6a, 0xfa, 0xec, 0x9f, 0xda, 0xb1, 0x8e, 0xf8, 0x4f,
0x31, 0x5d, 0x54, 0x07, 0x30, 0x2d, 0x93, 0x6a, 0xdd, 0xbe, 0x6e, 0x5a, 0xd5, 0x0c, 0xb7, 0x7c,
0x3d, 0xde, 0xd2, 0xa4, 0x0d, 0xa6, 0xd8, 0x4a, 0x60, 0xc5, 0xf4, 0x3b, 0x6c, 0xb8, 0x9f, 0x8f,
0x88, 0x7b, 0x5a, 0xcd, 0x9e, 0x3d, 0xdc, 0x9f, 0x32, 0x25, 0x36, 0x5c, 0xae, 0x8d, 0x9a, 0x50,
0xec, 0x90, 0x9e, 0x69, 0x69, 0x9d, 0x81, 0xdd, 0x7d, 0x52, 0xcd, 0x71, 0x63, 0x35, 0xce, 0xb8,
0xce, 0x54, 0xeb, 0x4c, 0xb3, 0x95, 0xc0, 0xd0, 0x09, 0x7a, 0xe8, 0xfb, 0x50, 0xe8, 0xf6, 0x49,
0xf7, 0x89, 0x46, 0x4f, 0xaa, 0x79, 0xee, 0x63, 0x3d, 0xce, 0x47, 0x83, 0xe9, 0xb5, 0x4f, 0x5a,
0x09, 0x9c, 0xef, 0x8a, 0x26, 0x9b, 0xbf, 0x41, 0x06, 0xe6, 0x31, 0x71, 0x99, 0x7d, 0xe1, 0xec,
0xf9, 0xdf, 0x13, 0x9a, 0xdc, 0x83, 0x62, 0xf8, 0x1d, 0xf4, 0x23, 0x50, 0x88, 0x65, 0xc8, 0x69,
0x28, 0xdc, 0xc5, 0xd5, 0xd8, 0x75, 0xb6, 0x0c, 0x7f, 0x12, 0x05, 0x22, 0xdb, 0xe8, 0x2e, 0xe4,
0xba, 0xf6, 0x70, 0x68, 0xd2, 0x2a, 0x70, 0xeb, 0xb5, 0xd8, 0x09, 0x70, 0xad, 0x56, 0x02, 0x4b,
0x7d, 0xb4, 0x07, 0xe5, 0x81, 0xe9, 0x51, 0xcd, 0xb3, 0x74, 0xc7, 0xeb, 0xdb, 0xd4, 0xab, 0x16,
0xb9, 0x87, 0xeb, 0x71, 0x1e, 0x76, 0x4d, 0x8f, 0x1e, 0xfa, 0xca, 0xad, 0x04, 0x2e, 0x0d, 0xc2,
0x02, 0xe6, 0xcf, 0x3e, 0x3a, 0x22, 0x6e, 0xe0, 0xb0, 0xba, 0x74, 0xb6, 0xbf, 0x7d, 0xa6, 0xed,
0xdb, 0x33, 0x7f, 0x76, 0x58, 0x80, 0x7e, 0x0e, 0x17, 0x06, 0xb6, 0x6e, 0x04, 0xee, 0xb4, 0x6e,
0x7f, 0x64, 0x3d, 0xa9, 0x96, 0xb8, 0xd3, 0x1b, 0xb1, 0x83, 0xb4, 0x75, 0xc3, 0x77, 0xd1, 0x60,
0x06, 0xad, 0x04, 0x5e, 0x19, 0x4c, 0x0a, 0xd1, 0x63, 0x58, 0xd5, 0x1d, 0x67, 0x70, 0x3a, 0xe9,
0xbd, 0xcc, 0xbd, 0xdf, 0x8c, 0xf3, 0xbe, 0xcd, 0x6c, 0x26, 0xdd, 0x23, 0x7d, 0x4a, 0x5a, 0xcf,
0x43, 0xf6, 0x58, 0x1f, 0x8c, 0x88, 0xfa, 0x1d, 0x28, 0x86, 0xd2, 0x14, 0x55, 0x21, 0x3f, 0x24,
0x9e, 0xa7, 0xf7, 0x08, 0xcf, 0x6a, 0x05, 0xfb, 0x5d, 0xb5, 0x0c, 0x4b, 0xe1, 0xd4, 0x54, 0x9f,
0x27, 0x03, 0x4b, 0x96, 0x75, 0xcc, 0xf2, 0x98, 0xb8, 0x9e, 0x69, 0x5b, 0xbe, 0xa5, 0xec, 0xa2,
0x6b, 0x50, 0xe2, 0xfb, 0x47, 0xf3, 0xbf, 0xb3, 0xd4, 0xcf, 0xe0, 0x25, 0x2e, 0x7c, 0x24, 0x95,
0xd6, 0xa1, 0xe8, 0x6c, 0x39, 0x81, 0x4a, 0x9a, 0xab, 0x80, 0xb3, 0xe5, 0xf8, 0x0a, 0xaf, 0xc3,
0x12, 0x9b, 0x69, 0xa0, 0x91, 0xe1, 0x3f, 0x52, 0x64, 0x32, 0xa9, 0xa2, 0xfe, 0x39, 0x05, 0x95,
0xc9, 0x74, 0x46, 0x77, 0x21, 0xc3, 0x90, 0x4d, 0x82, 0x54, 0x6d, 0x43, 0xc0, 0xde, 0x86, 0x0f,
0x7b, 0x1b, 0x6d, 0x1f, 0xf6, 0xea, 0x85, 0xaf, 0xbe, 0x59, 0x4f, 0x3c, 0xff, 0xfb, 0x7a, 0x12,
0x73, 0x0b, 0x74, 0x89, 0x65, 0x9f, 0x6e, 0x5a, 0x9a, 0x69, 0xf0, 0x21, 0x2b, 0x2c, 0xb5, 0x74,
0xd3, 0xda, 0x31, 0xd0, 0x2e, 0x54, 0xba, 0xb6, 0xe5, 0x11, 0xcb, 0x1b, 0x79, 0x9a, 0x80, 0x55,
0x09, 0x4d, 0x91, 0x04, 0x13, 0x60, 0xdd, 0xf0, 0x35, 0x0f, 0xb8, 0x22, 0x5e, 0xee, 0x46, 0x05,
0xe8, 0x3e, 0xc0, 0xb1, 0x3e, 0x30, 0x0d, 0x9d, 0xda, 0xae, 0x57, 0xcd, 0x5c, 0x4d, 0xcf, 0xcc,
0xb2, 0x47, 0xbe, 0xca, 0x43, 0xc7, 0xd0, 0x29, 0xa9, 0x67, 0xd8, 0x70, 0x71, 0xc8, 0x12, 0xbd,
0x01, 0xcb, 0xba, 0xe3, 0x68, 0x1e, 0xd5, 0x29, 0xd1, 0x3a, 0xa7, 0x94, 0x78, 0x1c, 0xb6, 0x96,
0x70, 0x49, 0x77, 0x9c, 0x43, 0x26, 0xad, 0x33, 0x21, 0xba, 0x0e, 0x65, 0x86, 0x70, 0xa6, 0x3e,
0xd0, 0xfa, 0xc4, 0xec, 0xf5, 0x29, 0x07, 0xa8, 0x34, 0x2e, 0x49, 0x69, 0x8b, 0x0b, 0x55, 0x23,
0x58, 0x71, 0x8e, 0x6e, 0x08, 0x41, 0xc6, 0xd0, 0xa9, 0xce, 0x23, 0xb9, 0x84, 0x79, 0x9b, 0xc9,
0x1c, 0x9d, 0xf6, 0x65, 0x7c, 0x78, 0x1b, 0x5d, 0x84, 0x9c, 0x74, 0x9b, 0xe6, 0x6e, 0x65, 0x0f,
0xad, 0x42, 0xd6, 0x71, 0xed, 0x63, 0xc2, 0x97, 0xae, 0x80, 0x45, 0x47, 0xfd, 0x55, 0x0a, 0x56,
0xa6, 0x70, 0x90, 0xf9, 0xed, 0xeb, 0x5e, 0xdf, 0xff, 0x2d, 0xd6, 0x46, 0x77, 0x98, 0x5f, 0xdd,
0x20, 0xae, 0x3c, 0x3b, 0xaa, 0xd3, 0xa1, 0x6e, 0xf1, 0xef, 0x32, 0x34, 0x52, 0x1b, 0xed, 0x43,
0x65, 0xa0, 0x7b, 0x54, 0x13, 0xb8, 0xa2, 0x85, 0xce, 0x91, 0x69, 0x34, 0xdd, 0xd5, 0x7d, 0x24,
0x62, 0x9b, 0x5a, 0x3a, 0x2a, 0x0f, 0x22, 0x52, 0x84, 0x61, 0xb5, 0x73, 0xfa, 0x85, 0x6e, 0x51,
0xd3, 0x22, 0xda, 0xd4, 0xca, 0x5d, 0x9a, 0x72, 0xda, 0x3c, 0x36, 0x0d, 0x62, 0x75, 0xfd, 0x25,
0xbb, 0x10, 0x18, 0x07, 0x4b, 0xea, 0xa9, 0x18, 0xca, 0x51, 0x24, 0x47, 0x65, 0x48, 0xd1, 0x13,
0x19, 0x80, 0x14, 0x3d, 0x41, 0xdf, 0x85, 0x0c, 0x9b, 0x24, 0x9f, 0x7c, 0x79, 0xc6, 0x11, 0x28,
0xed, 0xda, 0xa7, 0x0e, 0xc1, 0x5c, 0x53, 0x55, 0x83, 0x74, 0x08, 0xd0, 0x7d, 0xd2, 0xab, 0x7a,
0x03, 0x96, 0x27, 0xe0, 0x3b, 0xb4, 0x7e, 0xc9, 0xf0, 0xfa, 0xa9, 0xcb, 0x50, 0x8a, 0x60, 0xb5,
0x7a, 0x11, 0x56, 0x67, 0x41, 0xaf, 0xda, 0x0f, 0xe4, 0x11, 0x08, 0x45, 0xb7, 0xa1, 0x10, 0x60,
0xaf, 0x48, 0xc7, 0xe9, 0x58, 0xf9, 0xca, 0x38, 0x50, 0x65, 0x79, 0xc8, 0xb6, 0x35, 0xdf, 0x0f,
0x29, 0x3e, 0xf0, 0xbc, 0xee, 0x38, 0x2d, 0xdd, 0xeb, 0xab, 0x9f, 0x42, 0x35, 0x0e, 0x57, 0x27,
0xa6, 0x91, 0x09, 0xb6, 0xe1, 0x45, 0xc8, 0x1d, 0xd9, 0xee, 0x50, 0xa7, 0xdc, 0x59, 0x09, 0xcb,
0x1e, 0xdb, 0x9e, 0x02, 0x63, 0xd3, 0x5c, 0x2c, 0x3a, 0xaa, 0x06, 0x97, 0x62, 0xb1, 0x95, 0x99,
0x98, 0x96, 0x41, 0x44, 0x3c, 0x4b, 0x58, 0x74, 0xc6, 0x8e, 0xc4, 0x60, 0x45, 0x87, 0xfd, 0xac,
0xc7, 0xe7, 0xca, 0xfd, 0x2b, 0x58, 0xf6, 0xd4, 0xdf, 0x15, 0xa0, 0x80, 0x89, 0xe7, 0x30, 0x4c,
0x40, 0x75, 0x50, 0xc8, 0x49, 0x97, 0x38, 0xd4, 0x87, 0xd1, 0xd9, 0xac, 0x41, 0x68, 0x37, 0x7d,
0x4d, 0x76, 0x64, 0x07, 0x66, 0xe8, 0x96, 0x64, 0x65, 0xf1, 0x04, 0x4b, 0x9a, 0x87, 0x69, 0xd9,
0x1d, 0x9f, 0x96, 0xa5, 0x63, 0x4f, 0x69, 0x61, 0x35, 0xc1, 0xcb, 0x6e, 0x49, 0x5e, 0x96, 0x99,
0xf3, 0x63, 0x11, 0x62, 0xd6, 0x88, 0x10, 0xb3, 0xec, 0x9c, 0x69, 0xc6, 0x30, 0xb3, 0x3b, 0x3e,
0x33, 0xcb, 0xcd, 0x19, 0xf1, 0x04, 0x35, 0xbb, 0x1f, 0xa5, 0x66, 0x82, 0x56, 0x5d, 0x8b, 0xb5,
0x8e, 0xe5, 0x66, 0x3f, 0x08, 0x71, 0xb3, 0x42, 0x2c, 0x31, 0x12, 0x4e, 0x66, 0x90, 0xb3, 0x46,
0x84, 0x9c, 0x29, 0x73, 0x62, 0x10, 0xc3, 0xce, 0x3e, 0x08, 0xb3, 0x33, 0x88, 0x25, 0x78, 0x72,
0xbd, 0x67, 0xd1, 0xb3, 0xf7, 0x02, 0x7a, 0x56, 0x8c, 0xe5, 0x97, 0x72, 0x0e, 0x93, 0xfc, 0x6c,
0x7f, 0x8a, 0x9f, 0x09, 0x3e, 0xf5, 0x46, 0xac, 0x8b, 0x39, 0x04, 0x6d, 0x7f, 0x8a, 0xa0, 0x95,
0xe6, 0x38, 0x9c, 0xc3, 0xd0, 0x7e, 0x31, 0x9b, 0xa1, 0xc5, 0x73, 0x28, 0x39, 0xcc, 0xc5, 0x28,
0x9a, 0x16, 0x43, 0xd1, 0x96, 0xb9, 0xfb, 0xb7, 0x62, 0xdd, 0x9f, 0x9f, 0xa3, 0xdd, 0x60, 0x27,
0xe4, 0x44, 0xce, 0x33, 0x94, 0x21, 0xae, 0x6b, 0xbb, 0x92, 0x6d, 0x89, 0x8e, 0xfa, 0x26, 0x3b,
0xb3, 0xc7, 0xf9, 0x7d, 0x06, 0x9f, 0xe3, 0x68, 0x1e, 0xca, 0x69, 0xf5, 0x0f, 0xc9, 0xb1, 0x2d,
0x3f, 0xe6, 0xc2, 0xe7, 0xbd, 0x22, 0xcf, 0xfb, 0x10, 0xcb, 0x4b, 0x45, 0x59, 0xde, 0x3a, 0x14,
0x19, 0x4a, 0x4f, 0x10, 0x38, 0xdd, 0x09, 0x08, 0xdc, 0x4d, 0x58, 0xe1, 0xc7, 0xb0, 0xe0, 0x82,
0x12, 0x9a, 0x33, 0xfc, 0x84, 0x59, 0x66, 0x1f, 0xc4, 0xe6, 0x14, 0x18, 0xfd, 0x0e, 0x5c, 0x08,
0xe9, 0x06, 0xe8, 0x2f, 0xd8, 0x4c, 0x25, 0xd0, 0xde, 0x96, 0xc7, 0xc0, 0x9f, 0x92, 0xe3, 0x08,
0x8d, 0x99, 0xdf, 0x2c, 0x92, 0x96, 0xfc, 0x1f, 0x91, 0xb4, 0xd4, 0x7f, 0x4d, 0xd2, 0xc2, 0xa7,
0x59, 0x3a, 0x7a, 0x9a, 0xfd, 0x2b, 0x39, 0x5e, 0x93, 0x80, 0x72, 0x75, 0x6d, 0x83, 0xc8, 0xf3,
0x85, 0xb7, 0x51, 0x05, 0xd2, 0x03, 0xbb, 0x27, 0x4f, 0x11, 0xd6, 0x64, 0x5a, 0x01, 0x08, 0x2b,
0x12, 0x63, 0x83, 0xa3, 0x29, 0xcb, 0x23, 0x2c, 0x8f, 0xa6, 0x0a, 0xa4, 0x9f, 0x10, 0x01, 0x99,
0x4b, 0x98, 0x35, 0x99, 0x1e, 0xdf, 0x64, 0x1c, 0x08, 0x97, 0xb0, 0xe8, 0xa0, 0xbb, 0xa0, 0xf0,
0x32, 0x84, 0x66, 0x3b, 0x9e, 0x44, 0xb7, 0xd7, 0xc2, 0x73, 0x15, 0xd5, 0x86, 0x8d, 0x03, 0xa6,
0xb3, 0xef, 0x78, 0xb8, 0xe0, 0xc8, 0x56, 0xe8, 0xd4, 0x55, 0x22, 0xe4, 0xef, 0x32, 0x28, 0x6c,
0xf4, 0x9e, 0xa3, 0x77, 0x09, 0x87, 0x2a, 0x05, 0x8f, 0x05, 0xea, 0x63, 0x40, 0xd3, 0x80, 0x8b,
0x5a, 0x90, 0x23, 0xc7, 0xc4, 0xa2, 0x6c, 0xd9, 0x58, 0xb8, 0x2f, 0xce, 0x60, 0x56, 0xc4, 0xa2,
0xf5, 0x2a, 0x0b, 0xf2, 0x3f, 0xbf, 0x59, 0xaf, 0x08, 0xed, 0xb7, 0xed, 0xa1, 0x49, 0xc9, 0xd0,
0xa1, 0xa7, 0x58, 0xda, 0xab, 0x7f, 0x4b, 0x31, 0x9a, 0x13, 0x01, 0xe3, 0x99, 0xb1, 0xf5, 0xb7,
0x7c, 0x2a, 0x44, 0x71, 0x17, 0x8b, 0xf7, 0x1a, 0x40, 0x4f, 0xf7, 0xb4, 0xa7, 0xba, 0x45, 0x89,
0x21, 0x83, 0x1e, 0x92, 0xa0, 0x1a, 0x14, 0x58, 0x6f, 0xe4, 0x11, 0x43, 0xb2, 0xed, 0xa0, 0x1f,
0x9a, 0x67, 0xfe, 0xdb, 0xcd, 0x33, 0x1a, 0xe5, 0xc2, 0x44, 0x94, 0x43, 0x14, 0x44, 0x09, 0x53,
0x10, 0x36, 0x36, 0xc7, 0x35, 0x6d, 0xd7, 0xa4, 0xa7, 0x7c, 0x69, 0xd2, 0x38, 0xe8, 0xb3, 0xcb,
0xdb, 0x90, 0x0c, 0x1d, 0xdb, 0x1e, 0x68, 0x02, 0x6e, 0x8a, 0xdc, 0x74, 0x49, 0x0a, 0x9b, 0x1c,
0x75, 0x7e, 0x9d, 0x1a, 0xe7, 0xdf, 0x98, 0x6a, 0xfe, 0xdf, 0x05, 0x58, 0xfd, 0x0d, 0xbf, 0x80,
0x46, 0x8f, 0x5b, 0x74, 0x08, 0x2b, 0x41, 0xfa, 0x6b, 0x23, 0x0e, 0x0b, 0xfe, 0x86, 0x5e, 0x14,
0x3f, 0x2a, 0xc7, 0x51, 0xb1, 0x87, 0x3e, 0x86, 0x57, 0x27, 0xb0, 0x2d, 0x70, 0x9d, 0x5a, 0x14,
0xe2, 0x5e, 0x89, 0x42, 0x9c, 0xef, 0x7a, 0x1c, 0xac, 0xf4, 0xb7, 0xcc, 0xba, 0x1d, 0x76, 0xa7,
0x09, 0xb3, 0x87, 0x99, 0xcb, 0x7f, 0x0d, 0x4a, 0x2e, 0xa1, 0xec, 0x9e, 0x1d, 0xb9, 0x35, 0x2e,
0x09, 0xa1, 0xbc, 0x8b, 0x1e, 0xc0, 0x2b, 0x33, 0x59, 0x04, 0xfa, 0x1e, 0x28, 0x63, 0x02, 0x92,
0x8c, 0xb9, 0x80, 0x05, 0x97, 0x8a, 0xb1, 0xae, 0xfa, 0xc7, 0xe4, 0xd8, 0x65, 0xf4, 0x9a, 0xd2,
0x84, 0x9c, 0x4b, 0xbc, 0xd1, 0x40, 0x5c, 0x1c, 0xca, 0x5b, 0xef, 0x2c, 0xc6, 0x3f, 0x98, 0x74,
0x34, 0xa0, 0x58, 0x1a, 0xab, 0x8f, 0x21, 0x27, 0x24, 0xa8, 0x08, 0xf9, 0x87, 0x7b, 0x0f, 0xf6,
0xf6, 0x3f, 0xda, 0xab, 0x24, 0x10, 0x40, 0x6e, 0xbb, 0xd1, 0x68, 0x1e, 0xb4, 0x2b, 0x49, 0xa4,
0x40, 0x76, 0xbb, 0xbe, 0x8f, 0xdb, 0x95, 0x14, 0x13, 0xe3, 0xe6, 0x87, 0xcd, 0x46, 0xbb, 0x92,
0x46, 0x2b, 0x50, 0x12, 0x6d, 0xed, 0xfe, 0x3e, 0xfe, 0xc9, 0x76, 0xbb, 0x92, 0x09, 0x89, 0x0e,
0x9b, 0x7b, 0xf7, 0x9a, 0xb8, 0x92, 0x55, 0xdf, 0x65, 0x37, 0x93, 0x18, 0xc6, 0x32, 0xbe, 0x83,
0x24, 0x43, 0x77, 0x10, 0xf5, 0xb7, 0x29, 0xa8, 0xc5, 0xd3, 0x10, 0xf4, 0xe1, 0xc4, 0xc4, 0xb7,
0xce, 0xc1, 0x61, 0x26, 0x66, 0x8f, 0xae, 0x43, 0xd9, 0x25, 0x47, 0x84, 0x76, 0xfb, 0x82, 0x16,
0x89, 0x23, 0xb3, 0x84, 0x4b, 0x52, 0xca, 0x8d, 0x3c, 0xa1, 0xf6, 0x19, 0xe9, 0x52, 0x4d, 0x60,
0x91, 0xd8, 0x74, 0x0a, 0x53, 0x63, 0xd2, 0x43, 0x21, 0x54, 0x3f, 0x3d, 0x57, 0x2c, 0x15, 0xc8,
0xe2, 0x66, 0x1b, 0x7f, 0x5c, 0x49, 0x23, 0x04, 0x65, 0xde, 0xd4, 0x0e, 0xf7, 0xb6, 0x0f, 0x0e,
0x5b, 0xfb, 0x2c, 0x96, 0x17, 0x60, 0xd9, 0x8f, 0xa5, 0x2f, 0xcc, 0xaa, 0x9f, 0x40, 0x39, 0x7a,
0xf7, 0x67, 0x21, 0x74, 0xed, 0x91, 0x65, 0xf0, 0x60, 0x64, 0xb1, 0xe8, 0xa0, 0xdb, 0x90, 0x3d,
0xb6, 0x45, 0x9a, 0xcd, 0xde, 0x6b, 0x8f, 0x6c, 0x4a, 0x42, 0xb5, 0x03, 0xa1, 0xad, 0x7e, 0x01,
0x59, 0x9e, 0x35, 0x2c, 0x03, 0xf8, 0x2d, 0x5e, 0x92, 0x2a, 0xd6, 0x46, 0x9f, 0x00, 0xe8, 0x94,
0xba, 0x66, 0x67, 0x34, 0x76, 0xbc, 0x3e, 0x3b, 0xeb, 0xb6, 0x7d, 0xbd, 0xfa, 0x65, 0x99, 0x7e,
0xab, 0x63, 0xd3, 0x50, 0x0a, 0x86, 0x1c, 0xaa, 0x7b, 0x50, 0x8e, 0xda, 0xfa, 0x34, 0x40, 0x8c,
0x21, 0x4a, 0x03, 0x04, 0xab, 0x93, 0x34, 0x20, 0x20, 0x11, 0x69, 0x51, 0xb1, 0xe1, 0x1d, 0xf5,
0x59, 0x12, 0x0a, 0xed, 0x13, 0xb9, 0x1e, 0x31, 0xc5, 0x82, 0xb1, 0x69, 0x2a, 0x7c, 0x35, 0x16,
0xd5, 0x87, 0x74, 0x50, 0xd3, 0xf8, 0x20, 0xd8, 0x71, 0x99, 0x45, 0x6f, 0x40, 0x7e, 0x71, 0x47,
0x66, 0xd9, 0xfb, 0xa0, 0x04, 0x98, 0xc9, 0xd8, 0xa9, 0x6e, 0x18, 0x2e, 0xf1, 0x3c, 0xb9, 0xef,
0xfd, 0x2e, 0xaf, 0x3d, 0xd9, 0x4f, 0xe5, 0xe5, 0x3b, 0x8d, 0x45, 0x47, 0x35, 0x60, 0x79, 0x02,
0x70, 0xd1, 0xfb, 0x90, 0x77, 0x46, 0x1d, 0xcd, 0x0f, 0xcf, 0xc4, 0x5b, 0x83, 0xcf, 0x7b, 0x46,
0x9d, 0x81, 0xd9, 0x7d, 0x40, 0x4e, 0xfd, 0xc1, 0x38, 0xa3, 0xce, 0x03, 0x11, 0x45, 0xf1, 0x2b,
0xa9, 0xf0, 0xaf, 0x1c, 0x43, 0xc1, 0xdf, 0x14, 0xe8, 0x87, 0xa0, 0x04, 0x58, 0x1e, 0x94, 0x24,
0x63, 0x0f, 0x01, 0xe9, 0x7e, 0x6c, 0xc2, 0x48, 0xb4, 0x67, 0xf6, 0x2c, 0x62, 0x68, 0x63, 0x7e,
0xcc, 0x7f, 0xad, 0x80, 0x97, 0xc5, 0x87, 0x5d, 0x9f, 0x1c, 0xab, 0xff, 0x4e, 0x42, 0xc1, 0x2f,
0x3d, 0xa1, 0x77, 0x43, 0xfb, 0xae, 0x3c, 0xe3, 0xa2, 0xee, 0x2b, 0x8e, 0xcb, 0x47, 0xd1, 0xb1,
0xa6, 0xce, 0x3f, 0xd6, 0xb8, 0x3a, 0xa0, 0x5f, 0x91, 0xcd, 0x9c, 0xbb, 0x22, 0xfb, 0x36, 0x20,
0x6a, 0x53, 0x7d, 0xa0, 0x1d, 0xdb, 0xd4, 0xb4, 0x7a, 0x9a, 0x08, 0xb6, 0xe0, 0x02, 0x15, 0xfe,
0xe5, 0x11, 0xff, 0x70, 0xc0, 0xe3, 0xfe, 0xcb, 0x24, 0x14, 0x02, 0x50, 0x3f, 0x6f, 0x35, 0xe8,
0x22, 0xe4, 0x24, 0x6e, 0x89, 0x72, 0x90, 0xec, 0x05, 0x85, 0xc9, 0x4c, 0xa8, 0x30, 0x59, 0x83,
0xc2, 0x90, 0x50, 0x9d, 0x9f, 0x6c, 0xe2, 0x8a, 0x12, 0xf4, 0x6f, 0xbe, 0x07, 0xc5, 0x50, 0x61,
0x8e, 0x65, 0xde, 0x5e, 0xf3, 0xa3, 0x4a, 0xa2, 0x96, 0x7f, 0xf6, 0xe5, 0xd5, 0xf4, 0x1e, 0x79,
0xca, 0xf6, 0x2c, 0x6e, 0x36, 0x5a, 0xcd, 0xc6, 0x83, 0x4a, 0xb2, 0x56, 0x7c, 0xf6, 0xe5, 0xd5,
0x3c, 0x26, 0xbc, 0x48, 0x70, 0xb3, 0x05, 0x4b, 0xe1, 0x55, 0x89, 0x42, 0x1f, 0x82, 0xf2, 0xbd,
0x87, 0x07, 0xbb, 0x3b, 0x8d, 0xed, 0x76, 0x53, 0x7b, 0xb4, 0xdf, 0x6e, 0x56, 0x92, 0xe8, 0x55,
0xb8, 0xb0, 0xbb, 0xf3, 0xe3, 0x56, 0x5b, 0x6b, 0xec, 0xee, 0x34, 0xf7, 0xda, 0xda, 0x76, 0xbb,
0xbd, 0xdd, 0x78, 0x50, 0x49, 0x6d, 0xfd, 0x5e, 0x81, 0xe5, 0xed, 0x7a, 0x63, 0x87, 0xc1, 0xb6,
0xd9, 0xd5, 0xf9, 0xfd, 0xb1, 0x01, 0x19, 0x7e, 0x43, 0x3c, 0xf3, 0xd9, 0xae, 0x76, 0x76, 0xf9,
0x08, 0xdd, 0x87, 0x2c, 0xbf, 0x3c, 0xa2, 0xb3, 0xdf, 0xf1, 0x6a, 0x73, 0xea, 0x49, 0x6c, 0x30,
0x3c, 0x3d, 0xce, 0x7c, 0xd8, 0xab, 0x9d, 0x5d, 0x5e, 0x42, 0x18, 0x94, 0x31, 0xf9, 0x9c, 0xff,
0xd0, 0x55, 0x5b, 0x00, 0x6c, 0xd0, 0x2e, 0xe4, 0xfd, 0xfb, 0xc2, 0xbc, 0xa7, 0xb7, 0xda, 0xdc,
0xfa, 0x0f, 0x0b, 0x97, 0xb8, 0xd7, 0x9d, 0xfd, 0x8e, 0x58, 0x9b, 0x53, 0xcc, 0x42, 0x3b, 0x90,
0x93, 0x84, 0x6a, 0xce, 0x73, 0x5a, 0x6d, 0x5e, 0x3d, 0x87, 0x05, 0x6d, 0x7c, 0x63, 0x9e, 0xff,
0x3a, 0x5a, 0x5b, 0xa0, 0x4e, 0x87, 0x1e, 0x02, 0x84, 0x6e, 0x71, 0x0b, 0x3c, 0x7b, 0xd6, 0x16,
0xa9, 0xbf, 0xa1, 0x7d, 0x28, 0x04, 0xa4, 0x7a, 0xee, 0x23, 0x64, 0x6d, 0x7e, 0x21, 0x0c, 0x3d,
0x86, 0x52, 0x94, 0x4c, 0x2e, 0xf6, 0xb4, 0x58, 0x5b, 0xb0, 0xc2, 0xc5, 0xfc, 0x47, 0x99, 0xe5,
0x62, 0x4f, 0x8d, 0xb5, 0x05, 0x0b, 0x5e, 0xe8, 0x33, 0x58, 0x99, 0x66, 0x7e, 0x8b, 0xbf, 0x3c,
0xd6, 0xce, 0x51, 0x02, 0x43, 0x43, 0x40, 0x33, 0x18, 0xe3, 0x39, 0x1e, 0x22, 0x6b, 0xe7, 0xa9,
0x88, 0xd5, 0x9b, 0x5f, 0xbd, 0x58, 0x4b, 0x7e, 0xfd, 0x62, 0x2d, 0xf9, 0x8f, 0x17, 0x6b, 0xc9,
0xe7, 0x2f, 0xd7, 0x12, 0x5f, 0xbf, 0x5c, 0x4b, 0xfc, 0xf5, 0xe5, 0x5a, 0xe2, 0x67, 0x6f, 0xf5,
0x4c, 0xda, 0x1f, 0x75, 0x36, 0xba, 0xf6, 0x70, 0x33, 0xfc, 0x0f, 0x87, 0x59, 0xff, 0xba, 0xe8,
0xe4, 0xf8, 0xa1, 0x72, 0xeb, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x92, 0xa5, 0x39, 0xcc, 0x95,
0x21, 0x00, 0x00,
// 2583 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x4b, 0x73, 0x1b, 0xc7,
0x11, 0xc6, 0x1b, 0xd8, 0x26, 0x01, 0x82, 0x23, 0x5a, 0x86, 0x60, 0x89, 0x94, 0x57, 0x25, 0xc7,
0x92, 0x6d, 0x32, 0xa6, 0x4a, 0x8a, 0x5c, 0xce, 0xc3, 0x04, 0x04, 0x05, 0xb4, 0x18, 0x92, 0x19,
0x42, 0x72, 0x39, 0x89, 0xb5, 0x5e, 0x60, 0x87, 0xc0, 0x5a, 0xc0, 0xee, 0x1a, 0x3b, 0xa0, 0x48,
0x1f, 0xf3, 0xb8, 0xa8, 0x72, 0xd0, 0x31, 0x17, 0x57, 0xe5, 0x1f, 0xe4, 0x9a, 0x53, 0x4e, 0x39,
0xf8, 0x90, 0x54, 0xf9, 0x98, 0x93, 0x93, 0x92, 0x6e, 0xf9, 0x03, 0x39, 0xa5, 0x2a, 0x35, 0x8f,
0x7d, 0x01, 0x58, 0x00, 0x8c, 0x73, 0xcb, 0x6d, 0xa6, 0xb7, 0xbb, 0x31, 0xd3, 0x33, 0xdd, 0xfd,
0x75, 0x0f, 0xe0, 0x35, 0x4a, 0x2c, 0x83, 0x0c, 0x07, 0xa6, 0x45, 0xb7, 0xf4, 0x76, 0xc7, 0xdc,
0xa2, 0x67, 0x0e, 0x71, 0x37, 0x9d, 0xa1, 0x4d, 0x6d, 0xb4, 0x12, 0x7c, 0xdc, 0x64, 0x1f, 0xab,
0x57, 0x42, 0xdc, 0x9d, 0xe1, 0x99, 0x43, 0xed, 0x2d, 0x67, 0x68, 0xdb, 0xc7, 0x82, 0xbf, 0x7a,
0x39, 0xf4, 0x99, 0xeb, 0x09, 0x6b, 0x8b, 0x7c, 0x95, 0xc2, 0x4f, 0xc8, 0x99, 0xf7, 0xf5, 0xca,
0x84, 0xac, 0xa3, 0x0f, 0xf5, 0x81, 0xf7, 0x79, 0xa3, 0x6b, 0xdb, 0xdd, 0x3e, 0xd9, 0xe2, 0xb3,
0xf6, 0xe8, 0x78, 0x8b, 0x9a, 0x03, 0xe2, 0x52, 0x7d, 0xe0, 0x48, 0x86, 0xb5, 0xae, 0xdd, 0xb5,
0xf9, 0x70, 0x8b, 0x8d, 0x04, 0x55, 0xfd, 0x6b, 0x1e, 0xf2, 0x98, 0x7c, 0x3e, 0x22, 0x2e, 0x45,
0xdb, 0x90, 0x21, 0x9d, 0x9e, 0x5d, 0x49, 0x5e, 0x4d, 0xbe, 0xb9, 0xb4, 0x7d, 0x79, 0x73, 0x6c,
0x73, 0x9b, 0x92, 0xaf, 0xd1, 0xe9, 0xd9, 0xcd, 0x04, 0xe6, 0xbc, 0xe8, 0x36, 0x64, 0x8f, 0xfb,
0x23, 0xb7, 0x57, 0x49, 0x71, 0xa1, 0x2b, 0x71, 0x42, 0xf7, 0x19, 0x53, 0x33, 0x81, 0x05, 0x37,
0xfb, 0x29, 0xd3, 0x3a, 0xb6, 0x2b, 0xe9, 0xd9, 0x3f, 0xb5, 0x6b, 0x1d, 0xf3, 0x9f, 0x62, 0xbc,
0xa8, 0x06, 0x60, 0x5a, 0x26, 0xd5, 0x3a, 0x3d, 0xdd, 0xb4, 0x2a, 0x19, 0x2e, 0xf9, 0x7a, 0xbc,
0xa4, 0x49, 0xeb, 0x8c, 0xb1, 0x99, 0xc0, 0x8a, 0xe9, 0x4d, 0xd8, 0x72, 0x3f, 0x1f, 0x91, 0xe1,
0x59, 0x25, 0x3b, 0x7b, 0xb9, 0x3f, 0x65, 0x4c, 0x6c, 0xb9, 0x9c, 0x1b, 0x35, 0x60, 0xa9, 0x4d,
0xba, 0xa6, 0xa5, 0xb5, 0xfb, 0x76, 0xe7, 0x49, 0x25, 0xc7, 0x85, 0xd5, 0x38, 0xe1, 0x1a, 0x63,
0xad, 0x31, 0xce, 0x66, 0x02, 0x43, 0xdb, 0x9f, 0xa1, 0xef, 0x43, 0xa1, 0xd3, 0x23, 0x9d, 0x27,
0x1a, 0x3d, 0xad, 0xe4, 0xb9, 0x8e, 0x8d, 0x38, 0x1d, 0x75, 0xc6, 0xd7, 0x3a, 0x6d, 0x26, 0x70,
0xbe, 0x23, 0x86, 0x6c, 0xff, 0x06, 0xe9, 0x9b, 0x27, 0x64, 0xc8, 0xe4, 0x0b, 0xb3, 0xf7, 0x7f,
0x4f, 0x70, 0x72, 0x0d, 0x8a, 0xe1, 0x4d, 0xd0, 0x8f, 0x40, 0x21, 0x96, 0x21, 0xb7, 0xa1, 0x70,
0x15, 0x57, 0x63, 0xcf, 0xd9, 0x32, 0xbc, 0x4d, 0x14, 0x88, 0x1c, 0xa3, 0xbb, 0x90, 0xeb, 0xd8,
0x83, 0x81, 0x49, 0x2b, 0xc0, 0xa5, 0xd7, 0x63, 0x37, 0xc0, 0xb9, 0x9a, 0x09, 0x2c, 0xf9, 0xd1,
0x3e, 0x94, 0xfa, 0xa6, 0x4b, 0x35, 0xd7, 0xd2, 0x1d, 0xb7, 0x67, 0x53, 0xb7, 0xb2, 0xc4, 0x35,
0x5c, 0x8f, 0xd3, 0xb0, 0x67, 0xba, 0xf4, 0xc8, 0x63, 0x6e, 0x26, 0x70, 0xb1, 0x1f, 0x26, 0x30,
0x7d, 0xf6, 0xf1, 0x31, 0x19, 0xfa, 0x0a, 0x2b, 0xcb, 0xb3, 0xf5, 0x1d, 0x30, 0x6e, 0x4f, 0x9e,
0xe9, 0xb3, 0xc3, 0x04, 0xf4, 0x73, 0xb8, 0xd0, 0xb7, 0x75, 0xc3, 0x57, 0xa7, 0x75, 0x7a, 0x23,
0xeb, 0x49, 0xa5, 0xc8, 0x95, 0xde, 0x88, 0x5d, 0xa4, 0xad, 0x1b, 0x9e, 0x8a, 0x3a, 0x13, 0x68,
0x26, 0xf0, 0x6a, 0x7f, 0x9c, 0x88, 0x1e, 0xc3, 0x9a, 0xee, 0x38, 0xfd, 0xb3, 0x71, 0xed, 0x25,
0xae, 0xfd, 0x66, 0x9c, 0xf6, 0x1d, 0x26, 0x33, 0xae, 0x1e, 0xe9, 0x13, 0xd4, 0x5a, 0x1e, 0xb2,
0x27, 0x7a, 0x7f, 0x44, 0xd4, 0xef, 0xc0, 0x52, 0xc8, 0x4d, 0x51, 0x05, 0xf2, 0x03, 0xe2, 0xba,
0x7a, 0x97, 0x70, 0xaf, 0x56, 0xb0, 0x37, 0x55, 0x4b, 0xb0, 0x1c, 0x76, 0x4d, 0xf5, 0x79, 0xd2,
0x97, 0x64, 0x5e, 0xc7, 0x24, 0x4f, 0xc8, 0xd0, 0x35, 0x6d, 0xcb, 0x93, 0x94, 0x53, 0x74, 0x0d,
0x8a, 0xfc, 0xfe, 0x68, 0xde, 0x77, 0xe6, 0xfa, 0x19, 0xbc, 0xcc, 0x89, 0x8f, 0x24, 0xd3, 0x06,
0x2c, 0x39, 0xdb, 0x8e, 0xcf, 0x92, 0xe6, 0x2c, 0xe0, 0x6c, 0x3b, 0x1e, 0xc3, 0xeb, 0xb0, 0xcc,
0x76, 0xea, 0x73, 0x64, 0xf8, 0x8f, 0x2c, 0x31, 0x9a, 0x64, 0x51, 0xff, 0x92, 0x82, 0xf2, 0xb8,
0x3b, 0xa3, 0xbb, 0x90, 0x61, 0x91, 0x4d, 0x06, 0xa9, 0xea, 0xa6, 0x08, 0x7b, 0x9b, 0x5e, 0xd8,
0xdb, 0x6c, 0x79, 0x61, 0xaf, 0x56, 0xf8, 0xea, 0x9b, 0x8d, 0xc4, 0xf3, 0xbf, 0x6f, 0x24, 0x31,
0x97, 0x40, 0x97, 0x98, 0xf7, 0xe9, 0xa6, 0xa5, 0x99, 0x06, 0x5f, 0xb2, 0xc2, 0x5c, 0x4b, 0x37,
0xad, 0x5d, 0x03, 0xed, 0x41, 0xb9, 0x63, 0x5b, 0x2e, 0xb1, 0xdc, 0x91, 0xab, 0x89, 0xb0, 0x2a,
0x43, 0x53, 0xc4, 0xc1, 0x44, 0xb0, 0xae, 0x7b, 0x9c, 0x87, 0x9c, 0x11, 0xaf, 0x74, 0xa2, 0x04,
0x74, 0x1f, 0xe0, 0x44, 0xef, 0x9b, 0x86, 0x4e, 0xed, 0xa1, 0x5b, 0xc9, 0x5c, 0x4d, 0x4f, 0xf5,
0xb2, 0x47, 0x1e, 0xcb, 0x43, 0xc7, 0xd0, 0x29, 0xa9, 0x65, 0xd8, 0x72, 0x71, 0x48, 0x12, 0xbd,
0x01, 0x2b, 0xba, 0xe3, 0x68, 0x2e, 0xd5, 0x29, 0xd1, 0xda, 0x67, 0x94, 0xb8, 0x3c, 0x6c, 0x2d,
0xe3, 0xa2, 0xee, 0x38, 0x47, 0x8c, 0x5a, 0x63, 0x44, 0x74, 0x1d, 0x4a, 0x2c, 0xc2, 0x99, 0x7a,
0x5f, 0xeb, 0x11, 0xb3, 0xdb, 0xa3, 0x3c, 0x40, 0xa5, 0x71, 0x51, 0x52, 0x9b, 0x9c, 0xa8, 0x1a,
0xfe, 0x89, 0xf3, 0xe8, 0x86, 0x10, 0x64, 0x0c, 0x9d, 0xea, 0xdc, 0x92, 0xcb, 0x98, 0x8f, 0x19,
0xcd, 0xd1, 0x69, 0x4f, 0xda, 0x87, 0x8f, 0xd1, 0x45, 0xc8, 0x49, 0xb5, 0x69, 0xae, 0x56, 0xce,
0xd0, 0x1a, 0x64, 0x9d, 0xa1, 0x7d, 0x42, 0xf8, 0xd1, 0x15, 0xb0, 0x98, 0xa8, 0xbf, 0x4e, 0xc1,
0xea, 0x44, 0x1c, 0x64, 0x7a, 0x7b, 0xba, 0xdb, 0xf3, 0x7e, 0x8b, 0x8d, 0xd1, 0x1d, 0xa6, 0x57,
0x37, 0xc8, 0x50, 0xe6, 0x8e, 0xca, 0xa4, 0xa9, 0x9b, 0xfc, 0xbb, 0x34, 0x8d, 0xe4, 0x46, 0x07,
0x50, 0xee, 0xeb, 0x2e, 0xd5, 0x44, 0x5c, 0xd1, 0x42, 0x79, 0x64, 0x32, 0x9a, 0xee, 0xe9, 0x5e,
0x24, 0x62, 0x97, 0x5a, 0x2a, 0x2a, 0xf5, 0x23, 0x54, 0x84, 0x61, 0xad, 0x7d, 0xf6, 0x85, 0x6e,
0x51, 0xd3, 0x22, 0xda, 0xc4, 0xc9, 0x5d, 0x9a, 0x50, 0xda, 0x38, 0x31, 0x0d, 0x62, 0x75, 0xbc,
0x23, 0xbb, 0xe0, 0x0b, 0xfb, 0x47, 0xea, 0xaa, 0x18, 0x4a, 0xd1, 0x48, 0x8e, 0x4a, 0x90, 0xa2,
0xa7, 0xd2, 0x00, 0x29, 0x7a, 0x8a, 0xbe, 0x0b, 0x19, 0xb6, 0x49, 0xbe, 0xf9, 0xd2, 0x94, 0x14,
0x28, 0xe5, 0x5a, 0x67, 0x0e, 0xc1, 0x9c, 0x53, 0x55, 0x7d, 0x77, 0xf0, 0xa3, 0xfb, 0xb8, 0x56,
0xf5, 0x06, 0xac, 0x8c, 0x85, 0xef, 0xd0, 0xf9, 0x25, 0xc3, 0xe7, 0xa7, 0xae, 0x40, 0x31, 0x12,
0xab, 0xd5, 0x8b, 0xb0, 0x36, 0x2d, 0xf4, 0xaa, 0x3d, 0x9f, 0x1e, 0x09, 0xa1, 0xe8, 0x36, 0x14,
0xfc, 0xd8, 0x2b, 0xdc, 0x71, 0xd2, 0x56, 0x1e, 0x33, 0xf6, 0x59, 0x99, 0x1f, 0xb2, 0x6b, 0xcd,
0xef, 0x43, 0x8a, 0x2f, 0x3c, 0xaf, 0x3b, 0x4e, 0x53, 0x77, 0x7b, 0xea, 0xa7, 0x50, 0x89, 0x8b,
0xab, 0x63, 0xdb, 0xc8, 0xf8, 0xd7, 0xf0, 0x22, 0xe4, 0x8e, 0xed, 0xe1, 0x40, 0xa7, 0x5c, 0x59,
0x11, 0xcb, 0x19, 0xbb, 0x9e, 0x22, 0xc6, 0xa6, 0x39, 0x59, 0x4c, 0x54, 0x0d, 0x2e, 0xc5, 0xc6,
0x56, 0x26, 0x62, 0x5a, 0x06, 0x11, 0xf6, 0x2c, 0x62, 0x31, 0x09, 0x14, 0x89, 0xc5, 0x8a, 0x09,
0xfb, 0x59, 0x97, 0xef, 0x95, 0xeb, 0x57, 0xb0, 0x9c, 0xa9, 0xbf, 0x2f, 0x40, 0x01, 0x13, 0xd7,
0x61, 0x31, 0x01, 0xd5, 0x40, 0x21, 0xa7, 0x1d, 0xe2, 0x50, 0x2f, 0x8c, 0x4e, 0x47, 0x0d, 0x82,
0xbb, 0xe1, 0x71, 0xb2, 0x94, 0xed, 0x8b, 0xa1, 0x5b, 0x12, 0x95, 0xc5, 0x03, 0x2c, 0x29, 0x1e,
0x86, 0x65, 0x77, 0x3c, 0x58, 0x96, 0x8e, 0xcd, 0xd2, 0x42, 0x6a, 0x0c, 0x97, 0xdd, 0x92, 0xb8,
0x2c, 0x33, 0xe7, 0xc7, 0x22, 0xc0, 0xac, 0x1e, 0x01, 0x66, 0xd9, 0x39, 0xdb, 0x8c, 0x41, 0x66,
0x77, 0x3c, 0x64, 0x96, 0x9b, 0xb3, 0xe2, 0x31, 0x68, 0x76, 0x3f, 0x0a, 0xcd, 0x04, 0xac, 0xba,
0x16, 0x2b, 0x1d, 0x8b, 0xcd, 0x7e, 0x10, 0xc2, 0x66, 0x85, 0x58, 0x60, 0x24, 0x94, 0x4c, 0x01,
0x67, 0xf5, 0x08, 0x38, 0x53, 0xe6, 0xd8, 0x20, 0x06, 0x9d, 0x7d, 0x10, 0x46, 0x67, 0x10, 0x0b,
0xf0, 0xe4, 0x79, 0x4f, 0x83, 0x67, 0xef, 0xf9, 0xf0, 0x6c, 0x29, 0x16, 0x5f, 0xca, 0x3d, 0x8c,
0xe3, 0xb3, 0x83, 0x09, 0x7c, 0x26, 0xf0, 0xd4, 0x1b, 0xb1, 0x2a, 0xe6, 0x00, 0xb4, 0x83, 0x09,
0x80, 0x56, 0x9c, 0xa3, 0x70, 0x0e, 0x42, 0xfb, 0xc5, 0x74, 0x84, 0x16, 0x8f, 0xa1, 0xe4, 0x32,
0x17, 0x83, 0x68, 0x5a, 0x0c, 0x44, 0x5b, 0xe1, 0xea, 0xdf, 0x8a, 0x55, 0x7f, 0x7e, 0x8c, 0x76,
0x83, 0x65, 0xc8, 0x31, 0x9f, 0x67, 0x51, 0x86, 0x0c, 0x87, 0xf6, 0x50, 0xa2, 0x2d, 0x31, 0x51,
0xdf, 0x64, 0x39, 0x3b, 0xf0, 0xef, 0x19, 0x78, 0x8e, 0x47, 0xf3, 0x90, 0x4f, 0xab, 0x7f, 0x4c,
0x06, 0xb2, 0x3c, 0xcd, 0x85, 0xf3, 0xbd, 0x22, 0xf3, 0x7d, 0x08, 0xe5, 0xa5, 0xa2, 0x28, 0x6f,
0x03, 0x96, 0x58, 0x94, 0x1e, 0x03, 0x70, 0xba, 0xe3, 0x03, 0xb8, 0x9b, 0xb0, 0xca, 0xd3, 0xb0,
0xc0, 0x82, 0x32, 0x34, 0x67, 0x78, 0x86, 0x59, 0x61, 0x1f, 0xc4, 0xe5, 0x14, 0x31, 0xfa, 0x1d,
0xb8, 0x10, 0xe2, 0xf5, 0xa3, 0xbf, 0x40, 0x33, 0x65, 0x9f, 0x7b, 0x47, 0xa6, 0x81, 0x3f, 0x27,
0x03, 0x0b, 0x05, 0xc8, 0x6f, 0x1a, 0x48, 0x4b, 0xfe, 0x8f, 0x40, 0x5a, 0xea, 0xbf, 0x06, 0x69,
0xe1, 0x6c, 0x96, 0x8e, 0x66, 0xb3, 0x7f, 0x25, 0x83, 0x33, 0xf1, 0x21, 0x57, 0xc7, 0x36, 0x88,
0xcc, 0x2f, 0x7c, 0x8c, 0xca, 0x90, 0xee, 0xdb, 0x5d, 0x99, 0x45, 0xd8, 0x90, 0x71, 0xf9, 0x41,
0x58, 0x91, 0x31, 0xd6, 0x4f, 0x4d, 0x59, 0x6e, 0x61, 0x99, 0x9a, 0xca, 0x90, 0x7e, 0x42, 0x44,
0xc8, 0x5c, 0xc6, 0x6c, 0xc8, 0xf8, 0xf8, 0x25, 0xe3, 0x81, 0x70, 0x19, 0x8b, 0x09, 0xba, 0x0b,
0x0a, 0x6f, 0x43, 0x68, 0xb6, 0xe3, 0xca, 0xe8, 0xf6, 0x5a, 0x78, 0xaf, 0xa2, 0xdb, 0xb0, 0x79,
0xc8, 0x78, 0x0e, 0x1c, 0x17, 0x17, 0x1c, 0x39, 0x0a, 0x65, 0x5d, 0x25, 0x02, 0xfe, 0x2e, 0x83,
0xc2, 0x56, 0xef, 0x3a, 0x7a, 0x87, 0xf0, 0x50, 0xa5, 0xe0, 0x80, 0xa0, 0x3e, 0x06, 0x34, 0x19,
0x70, 0x51, 0x13, 0x72, 0xe4, 0x84, 0x58, 0x94, 0x1d, 0x1b, 0x33, 0xf7, 0xc5, 0x29, 0xc8, 0x8a,
0x58, 0xb4, 0x56, 0x61, 0x46, 0xfe, 0xe7, 0x37, 0x1b, 0x65, 0xc1, 0xfd, 0xb6, 0x3d, 0x30, 0x29,
0x19, 0x38, 0xf4, 0x0c, 0x4b, 0x79, 0xf5, 0x57, 0x29, 0x06, 0x73, 0x22, 0xc1, 0x78, 0xaa, 0x6d,
0xbd, 0x2b, 0x9f, 0x0a, 0x41, 0xdc, 0xc5, 0xec, 0xbd, 0x0e, 0xd0, 0xd5, 0x5d, 0xed, 0xa9, 0x6e,
0x51, 0x62, 0x48, 0xa3, 0x87, 0x28, 0xa8, 0x0a, 0x05, 0x36, 0x1b, 0xb9, 0xc4, 0x90, 0x68, 0xdb,
0x9f, 0x87, 0xf6, 0x99, 0xff, 0x76, 0xfb, 0x8c, 0x5a, 0xb9, 0x30, 0x6e, 0xe5, 0xdf, 0xa4, 0x02,
0x37, 0x09, 0x10, 0xe1, 0xff, 0x9f, 0x1d, 0x7e, 0xcb, 0xeb, 0xc4, 0x68, 0x56, 0x44, 0x47, 0xb0,
0xea, 0x7b, 0xa9, 0x36, 0xe2, 0xde, 0xeb, 0xdd, 0xbb, 0x45, 0xdd, 0xbc, 0x7c, 0x12, 0x25, 0xbb,
0xe8, 0x63, 0x78, 0x75, 0x2c, 0x04, 0xf9, 0xaa, 0x53, 0x8b, 0x46, 0xa2, 0x57, 0xa2, 0x91, 0xc8,
0x53, 0x1d, 0x18, 0x2b, 0xfd, 0x2d, 0x9d, 0x63, 0x97, 0x95, 0x1e, 0xe1, 0x24, 0x3f, 0xf5, 0xf8,
0xaf, 0x41, 0x71, 0x48, 0x28, 0x2b, 0x87, 0x23, 0xc5, 0xdd, 0xb2, 0x20, 0xca, 0x92, 0xf1, 0x10,
0x5e, 0x99, 0x9a, 0xec, 0xd1, 0xf7, 0x40, 0x09, 0x70, 0x42, 0x32, 0xa6, 0x4e, 0xf2, 0xb1, 0x7f,
0xc0, 0xab, 0xfe, 0x29, 0x19, 0xa8, 0x8c, 0x56, 0x13, 0x0d, 0xc8, 0x0d, 0x89, 0x3b, 0xea, 0x0b,
0x7c, 0x5f, 0xda, 0x7e, 0x67, 0x31, 0x98, 0xc0, 0xa8, 0xa3, 0x3e, 0xc5, 0x52, 0x58, 0x7d, 0x0c,
0x39, 0x41, 0x41, 0x4b, 0x90, 0x7f, 0xb8, 0xff, 0x60, 0xff, 0xe0, 0xa3, 0xfd, 0x72, 0x02, 0x01,
0xe4, 0x76, 0xea, 0xf5, 0xc6, 0x61, 0xab, 0x9c, 0x44, 0x0a, 0x64, 0x77, 0x6a, 0x07, 0xb8, 0x55,
0x4e, 0x31, 0x32, 0x6e, 0x7c, 0xd8, 0xa8, 0xb7, 0xca, 0x69, 0xb4, 0x0a, 0x45, 0x31, 0xd6, 0xee,
0x1f, 0xe0, 0x9f, 0xec, 0xb4, 0xca, 0x99, 0x10, 0xe9, 0xa8, 0xb1, 0x7f, 0xaf, 0x81, 0xcb, 0x59,
0xf5, 0x5d, 0x56, 0x40, 0xc4, 0x00, 0x8b, 0xa0, 0x54, 0x48, 0x86, 0x4a, 0x05, 0xf5, 0x77, 0x29,
0xa8, 0xc6, 0xa3, 0x05, 0xf4, 0xe1, 0xd8, 0xc6, 0xb7, 0xcf, 0x01, 0x35, 0xc6, 0x76, 0x8f, 0xae,
0x43, 0x69, 0x48, 0x8e, 0x09, 0xed, 0xf4, 0x04, 0x7a, 0x11, 0x99, 0xad, 0x88, 0x8b, 0x92, 0xca,
0x85, 0x5c, 0xc1, 0xf6, 0x19, 0xe9, 0x50, 0x4d, 0x54, 0x2d, 0xe2, 0xd2, 0x29, 0x8c, 0x8d, 0x51,
0x8f, 0x04, 0x51, 0xfd, 0xf4, 0x5c, 0xb6, 0x54, 0x20, 0x8b, 0x1b, 0x2d, 0xfc, 0x71, 0x39, 0x8d,
0x10, 0x94, 0xf8, 0x50, 0x3b, 0xda, 0xdf, 0x39, 0x3c, 0x6a, 0x1e, 0x30, 0x5b, 0x5e, 0x80, 0x15,
0xcf, 0x96, 0x1e, 0x31, 0xab, 0x7e, 0x02, 0xa5, 0x68, 0x89, 0xce, 0x4c, 0x38, 0xb4, 0x47, 0x96,
0xc1, 0x8d, 0x91, 0xc5, 0x62, 0x82, 0x6e, 0x43, 0xf6, 0xc4, 0x16, 0x6e, 0x36, 0xfd, 0xae, 0x3d,
0xb2, 0x29, 0x09, 0x95, 0xf8, 0x82, 0x5b, 0xfd, 0x02, 0xb2, 0xdc, 0x6b, 0x98, 0x07, 0xf0, 0x62,
0x5b, 0x62, 0x1f, 0x36, 0x46, 0x9f, 0x00, 0xe8, 0x94, 0x0e, 0xcd, 0xf6, 0x28, 0x50, 0xbc, 0x31,
0xdd, 0xeb, 0x76, 0x3c, 0xbe, 0xda, 0x65, 0xe9, 0x7e, 0x6b, 0x81, 0x68, 0xc8, 0x05, 0x43, 0x0a,
0xd5, 0x7d, 0x28, 0x45, 0x65, 0xbd, 0x6c, 0x9d, 0x9c, 0x92, 0xad, 0x53, 0xe1, 0x6c, 0xed, 0xe7,
0xfa, 0xb4, 0x68, 0xac, 0xf0, 0x89, 0xfa, 0x2c, 0x09, 0x85, 0xd6, 0xa9, 0x3c, 0x8f, 0x98, 0x9a,
0x3e, 0x10, 0x4d, 0x85, 0x2b, 0x58, 0xd1, 0x24, 0x48, 0xfb, 0xad, 0x87, 0x0f, 0xfc, 0x1b, 0x97,
0x59, 0xb4, 0x50, 0xf1, 0x7a, 0x30, 0xd2, 0xcb, 0xde, 0x07, 0xc5, 0x8f, 0x99, 0x0c, 0x44, 0xea,
0x86, 0x31, 0x24, 0xae, 0x2b, 0xf7, 0xe6, 0x4d, 0x79, 0x8b, 0xc8, 0x7e, 0x2a, 0x6b, 0xe4, 0x34,
0x16, 0x13, 0xd5, 0x80, 0x95, 0xb1, 0x80, 0x8b, 0xde, 0x87, 0xbc, 0x33, 0x6a, 0x6b, 0x9e, 0x79,
0xc6, 0x9e, 0x04, 0x3c, 0x78, 0x32, 0x6a, 0xf7, 0xcd, 0xce, 0x03, 0x72, 0xe6, 0x2d, 0xc6, 0x19,
0xb5, 0x1f, 0x08, 0x2b, 0x8a, 0x5f, 0x49, 0x85, 0x7f, 0xe5, 0x04, 0x0a, 0xde, 0xa5, 0x40, 0x3f,
0x04, 0xc5, 0x8f, 0xe5, 0x7e, 0xe7, 0x30, 0x36, 0x09, 0x48, 0xf5, 0x81, 0x08, 0xc3, 0xba, 0xae,
0xd9, 0xb5, 0x88, 0xa1, 0x05, 0x30, 0x96, 0xff, 0x5a, 0x01, 0xaf, 0x88, 0x0f, 0x7b, 0x1e, 0x86,
0x55, 0xff, 0x9d, 0x84, 0x82, 0xd7, 0x21, 0x42, 0xef, 0x86, 0xee, 0x5d, 0x69, 0x4a, 0x3d, 0xed,
0x31, 0x06, 0x5d, 0x9e, 0xe8, 0x5a, 0x53, 0xe7, 0x5f, 0x6b, 0x5c, 0xbb, 0xce, 0x6b, 0x9c, 0x66,
0xce, 0xdd, 0x38, 0x7d, 0x1b, 0x10, 0xb5, 0xa9, 0xde, 0xd7, 0x4e, 0x6c, 0x6a, 0x5a, 0x5d, 0x4d,
0x18, 0x5b, 0x60, 0x81, 0x32, 0xff, 0xf2, 0x88, 0x7f, 0x38, 0xe4, 0x76, 0xff, 0x65, 0x12, 0x0a,
0x7e, 0x50, 0x3f, 0x6f, 0xd3, 0xe6, 0x22, 0xe4, 0x64, 0xdc, 0x12, 0x5d, 0x1b, 0x39, 0xf3, 0xfb,
0x87, 0x99, 0x50, 0xff, 0xb0, 0x0a, 0x85, 0x01, 0xa1, 0x3a, 0xcf, 0x6c, 0xa2, 0x92, 0xf0, 0xe7,
0x37, 0xdf, 0x83, 0xa5, 0x50, 0xff, 0x8c, 0x79, 0xde, 0x7e, 0xe3, 0xa3, 0x72, 0xa2, 0x9a, 0x7f,
0xf6, 0xe5, 0xd5, 0xf4, 0x3e, 0x79, 0xca, 0xee, 0x2c, 0x6e, 0xd4, 0x9b, 0x8d, 0xfa, 0x83, 0x72,
0xb2, 0xba, 0xf4, 0xec, 0xcb, 0xab, 0x79, 0x4c, 0x78, 0x2d, 0x7f, 0xb3, 0x09, 0xcb, 0xe1, 0x53,
0x89, 0x86, 0x3e, 0x04, 0xa5, 0x7b, 0x0f, 0x0f, 0xf7, 0x76, 0xeb, 0x3b, 0xad, 0x86, 0xf6, 0xe8,
0xa0, 0xd5, 0x28, 0x27, 0xd1, 0xab, 0x70, 0x61, 0x6f, 0xf7, 0xc7, 0xcd, 0x96, 0x56, 0xdf, 0xdb,
0x6d, 0xec, 0xb7, 0xb4, 0x9d, 0x56, 0x6b, 0xa7, 0xfe, 0xa0, 0x9c, 0xda, 0xfe, 0x83, 0x02, 0x2b,
0x3b, 0xb5, 0xfa, 0x2e, 0x0b, 0xdb, 0x66, 0x47, 0xe7, 0x65, 0x5e, 0x1d, 0x32, 0xbc, 0x90, 0x9b,
0xf9, 0xba, 0x56, 0x9d, 0xdd, 0xe5, 0x41, 0xf7, 0x21, 0xcb, 0x6b, 0x3c, 0x34, 0xfb, 0xb9, 0xad,
0x3a, 0xa7, 0xed, 0xc3, 0x16, 0xc3, 0xdd, 0x63, 0xe6, 0xfb, 0x5b, 0x75, 0x76, 0x17, 0x08, 0x61,
0x50, 0x02, 0xf0, 0x39, 0xff, 0x3d, 0xaa, 0xba, 0x40, 0xb0, 0x41, 0x7b, 0x90, 0xf7, 0x60, 0xfd,
0xbc, 0x17, 0xb2, 0xea, 0xdc, 0x36, 0x0d, 0x33, 0x97, 0x28, 0xbf, 0x66, 0x3f, 0xf7, 0x55, 0xe7,
0xf4, 0x9c, 0xd0, 0x2e, 0xe4, 0x24, 0xa0, 0x9a, 0xf3, 0xea, 0x55, 0x9d, 0xd7, 0x76, 0x61, 0x46,
0x0b, 0x0a, 0xdb, 0xf9, 0x8f, 0x98, 0xd5, 0x05, 0xda, 0x69, 0xe8, 0x21, 0x40, 0xa8, 0xd8, 0x5a,
0xe0, 0x75, 0xb2, 0xba, 0x48, 0x9b, 0x0c, 0x1d, 0x40, 0xc1, 0x07, 0xd5, 0x73, 0xdf, 0x0a, 0xab,
0xf3, 0xfb, 0x55, 0xe8, 0x31, 0x14, 0xa3, 0x60, 0x72, 0xb1, 0x17, 0xc0, 0xea, 0x82, 0x8d, 0x28,
0xa6, 0x3f, 0x8a, 0x2c, 0x17, 0x7b, 0x11, 0xac, 0x2e, 0xd8, 0x97, 0x42, 0x9f, 0xc1, 0xea, 0x24,
0xf2, 0x5b, 0xfc, 0x81, 0xb0, 0x7a, 0x8e, 0x4e, 0x15, 0x1a, 0x00, 0x9a, 0x82, 0x18, 0xcf, 0xf1,
0x5e, 0x58, 0x3d, 0x4f, 0xe3, 0xaa, 0xd6, 0xf8, 0xea, 0xc5, 0x7a, 0xf2, 0xeb, 0x17, 0xeb, 0xc9,
0x7f, 0xbc, 0x58, 0x4f, 0x3e, 0x7f, 0xb9, 0x9e, 0xf8, 0xfa, 0xe5, 0x7a, 0xe2, 0x6f, 0x2f, 0xd7,
0x13, 0x3f, 0x7b, 0xab, 0x6b, 0xd2, 0xde, 0xa8, 0xbd, 0xd9, 0xb1, 0x07, 0x5b, 0xe1, 0x3f, 0x22,
0x4c, 0xfb, 0x73, 0x44, 0x3b, 0xc7, 0x93, 0xca, 0xad, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x63,
0x45, 0xfb, 0xc8, 0x3c, 0x21, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@@ -5241,25 +5212,6 @@ func (m *ResponseCheckTx) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if len(m.MempoolError) > 0 {
i -= len(m.MempoolError)
copy(dAtA[i:], m.MempoolError)
i = encodeVarintTypes(dAtA, i, uint64(len(m.MempoolError)))
i--
dAtA[i] = 0x5a
}
if m.Priority != 0 {
i = encodeVarintTypes(dAtA, i, uint64(m.Priority))
i--
dAtA[i] = 0x50
}
if len(m.Sender) > 0 {
i -= len(m.Sender)
copy(dAtA[i:], m.Sender)
i = encodeVarintTypes(dAtA, i, uint64(len(m.Sender)))
i--
dAtA[i] = 0x4a
}
if len(m.Codespace) > 0 {
i -= len(m.Codespace)
copy(dAtA[i:], m.Codespace)
@@ -6854,17 +6806,6 @@ func (m *ResponseCheckTx) Size() (n int) {
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
l = len(m.Sender)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
if m.Priority != 0 {
n += 1 + sovTypes(uint64(m.Priority))
}
l = len(m.MempoolError)
if l > 0 {
n += 1 + l + sovTypes(uint64(l))
}
return n
}
@@ -11079,89 +11020,6 @@ func (m *ResponseCheckTx) Unmarshal(dAtA []byte) error {
}
m.Codespace = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 9:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Sender = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 10:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType)
}
m.Priority = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Priority |= int64(b&0x7F) << shift
if b < 0x80 {
break
}
}
case 11:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field MempoolError", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.MempoolError = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipTypes(dAtA[iNdEx:])
@@ -12377,7 +12235,7 @@ func (m *EventAttribute) Unmarshal(dAtA []byte) error {
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var stringLen uint64
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
@@ -12387,29 +12245,31 @@ func (m *EventAttribute) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
if byteLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = string(dAtA[iNdEx:postIndex])
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
}
var stringLen uint64
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowTypes
@@ -12419,23 +12279,25 @@ func (m *EventAttribute) Unmarshal(dAtA []byte) error {
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= uint64(b&0x7F) << shift
byteLen |= int(b&0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
if byteLen < 0 {
return ErrInvalidLengthTypes
}
postIndex := iNdEx + intStringLen
postIndex := iNdEx + byteLen
if postIndex < 0 {
return ErrInvalidLengthTypes
}
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Value = string(dAtA[iNdEx:postIndex])
m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
if m.Value == nil {
m.Value = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 0 {

17
blockchain/doc.go Normal file
View File

@@ -0,0 +1,17 @@
/*
Package blockchain provides two implementations of the fast-sync protocol.
- v0 was the very first implementation. it's battle tested, but does not have a
lot of test coverage.
- v2 is the newest implementation, with a focus on testability and readability.
Check out ADR-40 for the formal model and requirements.
# Termination criteria
1. the maximum peer height is reached
2. termination timeout is triggered, which is set if the peer set is empty or
there are no pending requests.
*/
package blockchain

View File

@@ -1,7 +1,7 @@
package blocksync
package blockchain
import (
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/types"
)

View File

@@ -7,10 +7,11 @@ import (
"sync/atomic"
"time"
flow "github.com/tendermint/tendermint/internal/libs/flowrate"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
flow "github.com/tendermint/tendermint/libs/flowrate"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
tmsync "github.com/tendermint/tendermint/libs/sync"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
@@ -62,10 +63,10 @@ var peerTimeout = 15 * time.Second // not const so we can override with tests
// PeerID responsible for delivering the block.
type BlockRequest struct {
Height int64
PeerID types.NodeID
PeerID p2p.NodeID
}
// BlockPool keeps track of the block sync peers, block requests and block responses.
// BlockPool keeps track of the fast sync peers, block requests and block responses.
type BlockPool struct {
service.BaseService
lastAdvance time.Time
@@ -75,7 +76,7 @@ type BlockPool struct {
requesters map[int64]*bpRequester
height int64 // the lowest key in requesters.
// peers
peers map[types.NodeID]*bpPeer
peers map[p2p.NodeID]*bpPeer
maxPeerHeight int64 // the biggest reported height
// atomic
@@ -83,26 +84,20 @@ type BlockPool struct {
requestsCh chan<- BlockRequest
errorsCh chan<- peerError
startHeight int64
lastHundredBlockTimeStamp time.Time
lastSyncRate float64
}
// NewBlockPool returns a new BlockPool with the height equal to start. Block
// requests and errors will be sent to requestsCh and errorsCh accordingly.
func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool {
bp := &BlockPool{
peers: make(map[types.NodeID]*bpPeer),
peers: make(map[p2p.NodeID]*bpPeer),
requesters: make(map[int64]*bpRequester),
height: start,
startHeight: start,
numPending: 0,
requesters: make(map[int64]*bpRequester),
height: start,
numPending: 0,
requestsCh: requestsCh,
errorsCh: errorsCh,
lastSyncRate: 0,
requestsCh: requestsCh,
errorsCh: errorsCh,
}
bp.BaseService = *service.NewBaseService(nil, "BlockPool", bp)
return bp
@@ -112,7 +107,6 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
// pool's start time.
func (pool *BlockPool) OnStart() error {
pool.lastAdvance = time.Now()
pool.lastHundredBlockTimeStamp = pool.lastAdvance
go pool.makeRequestersRoutine()
return nil
}
@@ -223,19 +217,6 @@ func (pool *BlockPool) PopRequest() {
delete(pool.requesters, pool.height)
pool.height++
pool.lastAdvance = time.Now()
// the lastSyncRate will be updated every 100 blocks, it uses the adaptive filter
// to smooth the block sync rate and the unit represents the number of blocks per second.
if (pool.height-pool.startHeight)%100 == 0 {
newSyncRate := 100 / time.Since(pool.lastHundredBlockTimeStamp).Seconds()
if pool.lastSyncRate == 0 {
pool.lastSyncRate = newSyncRate
} else {
pool.lastSyncRate = 0.9*pool.lastSyncRate + 0.1*newSyncRate
}
pool.lastHundredBlockTimeStamp = time.Now()
}
} else {
panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height))
}
@@ -244,13 +225,13 @@ func (pool *BlockPool) PopRequest() {
// RedoRequest invalidates the block at pool.height,
// Remove the peer and redo request from others.
// Returns the ID of the removed peer.
func (pool *BlockPool) RedoRequest(height int64) types.NodeID {
func (pool *BlockPool) RedoRequest(height int64) p2p.NodeID {
pool.mtx.Lock()
defer pool.mtx.Unlock()
request := pool.requesters[height]
peerID := request.getPeerID()
if peerID != types.NodeID("") {
if peerID != p2p.NodeID("") {
// RemovePeer will redo all requesters associated with this peer.
pool.removePeer(peerID)
}
@@ -259,7 +240,7 @@ func (pool *BlockPool) RedoRequest(height int64) types.NodeID {
// AddBlock validates that the block comes from the peer it was expected from and calls the requester to store it.
// TODO: ensure that blocks come in order for each peer.
func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSize int) {
func (pool *BlockPool) AddBlock(peerID p2p.NodeID, block *types.Block, blockSize int) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
@@ -306,7 +287,7 @@ func (pool *BlockPool) LastAdvance() time.Time {
}
// SetPeerRange sets the peer's alleged blockchain base and height.
func (pool *BlockPool) SetPeerRange(peerID types.NodeID, base int64, height int64) {
func (pool *BlockPool) SetPeerRange(peerID p2p.NodeID, base int64, height int64) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
@@ -327,14 +308,14 @@ func (pool *BlockPool) SetPeerRange(peerID types.NodeID, base int64, height int6
// RemovePeer removes the peer with peerID from the pool. If there's no peer
// with peerID, function is a no-op.
func (pool *BlockPool) RemovePeer(peerID types.NodeID) {
func (pool *BlockPool) RemovePeer(peerID p2p.NodeID) {
pool.mtx.Lock()
defer pool.mtx.Unlock()
pool.removePeer(peerID)
}
func (pool *BlockPool) removePeer(peerID types.NodeID) {
func (pool *BlockPool) removePeer(peerID p2p.NodeID) {
for _, requester := range pool.requesters {
if requester.getPeerID() == peerID {
requester.redo(peerID)
@@ -415,14 +396,14 @@ func (pool *BlockPool) requestersLen() int64 {
return int64(len(pool.requesters))
}
func (pool *BlockPool) sendRequest(height int64, peerID types.NodeID) {
func (pool *BlockPool) sendRequest(height int64, peerID p2p.NodeID) {
if !pool.IsRunning() {
return
}
pool.requestsCh <- BlockRequest{height, peerID}
}
func (pool *BlockPool) sendError(err error, peerID types.NodeID) {
func (pool *BlockPool) sendError(err error, peerID p2p.NodeID) {
if !pool.IsRunning() {
return
}
@@ -448,20 +429,6 @@ func (pool *BlockPool) debug() string {
return str
}
func (pool *BlockPool) targetSyncBlocks() int64 {
pool.mtx.RLock()
defer pool.mtx.RUnlock()
return pool.maxPeerHeight - pool.startHeight + 1
}
func (pool *BlockPool) getLastSyncRate() float64 {
pool.mtx.RLock()
defer pool.mtx.RUnlock()
return pool.lastSyncRate
}
//-------------------------------------
type bpPeer struct {
@@ -470,7 +437,7 @@ type bpPeer struct {
height int64
base int64
pool *BlockPool
id types.NodeID
id p2p.NodeID
recvMonitor *flow.Monitor
timeout *time.Timer
@@ -478,7 +445,7 @@ type bpPeer struct {
logger log.Logger
}
func newBPPeer(pool *BlockPool, peerID types.NodeID, base int64, height int64) *bpPeer {
func newBPPeer(pool *BlockPool, peerID p2p.NodeID, base int64, height int64) *bpPeer {
peer := &bpPeer{
pool: pool,
id: peerID,
@@ -543,10 +510,10 @@ type bpRequester struct {
pool *BlockPool
height int64
gotBlockCh chan struct{}
redoCh chan types.NodeID // redo may send multitime, add peerId to identify repeat
redoCh chan p2p.NodeID // redo may send multitime, add peerId to identify repeat
mtx tmsync.Mutex
peerID types.NodeID
peerID p2p.NodeID
block *types.Block
}
@@ -555,7 +522,7 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester {
pool: pool,
height: height,
gotBlockCh: make(chan struct{}, 1),
redoCh: make(chan types.NodeID, 1),
redoCh: make(chan p2p.NodeID, 1),
peerID: "",
block: nil,
@@ -570,7 +537,7 @@ func (bpr *bpRequester) OnStart() error {
}
// Returns true if the peer matches and block doesn't already exist.
func (bpr *bpRequester) setBlock(block *types.Block, peerID types.NodeID) bool {
func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.NodeID) bool {
bpr.mtx.Lock()
if bpr.block != nil || bpr.peerID != peerID {
bpr.mtx.Unlock()
@@ -592,7 +559,7 @@ func (bpr *bpRequester) getBlock() *types.Block {
return bpr.block
}
func (bpr *bpRequester) getPeerID() types.NodeID {
func (bpr *bpRequester) getPeerID() p2p.NodeID {
bpr.mtx.Lock()
defer bpr.mtx.Unlock()
return bpr.peerID
@@ -614,7 +581,7 @@ func (bpr *bpRequester) reset() {
// Tells bpRequester to pick another peer and try again.
// NOTE: Nonblocking, and does nothing if another redo
// was already requested.
func (bpr *bpRequester) redo(peerID types.NodeID) {
func (bpr *bpRequester) redo(peerID p2p.NodeID) {
select {
case bpr.redoCh <- peerID:
default:

View File

@@ -2,7 +2,6 @@ package v0
import (
"fmt"
mrand "math/rand"
"testing"
"time"
@@ -11,6 +10,7 @@ import (
"github.com/tendermint/tendermint/libs/log"
tmrand "github.com/tendermint/tendermint/libs/rand"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
@@ -19,7 +19,7 @@ func init() {
}
type testPeer struct {
id types.NodeID
id p2p.NodeID
base int64
height int64
inputChan chan inputData // make sure each peer's data is sequential
@@ -49,7 +49,7 @@ func (p testPeer) simulateInput(input inputData) {
// input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height)
}
type testPeers map[types.NodeID]testPeer
type testPeers map[p2p.NodeID]testPeer
func (ps testPeers) start() {
for _, v := range ps {
@@ -66,8 +66,8 @@ func (ps testPeers) stop() {
func makePeers(numPeers int, minHeight, maxHeight int64) testPeers {
peers := make(testPeers, numPeers)
for i := 0; i < numPeers; i++ {
peerID := types.NodeID(tmrand.Str(12))
height := minHeight + mrand.Int63n(maxHeight-minHeight)
peerID := p2p.NodeID(tmrand.Str(12))
height := minHeight + tmrand.Int63n(maxHeight-minHeight)
base := minHeight + int64(i)
if base > height {
base = height
@@ -182,7 +182,7 @@ func TestBlockPoolTimeout(t *testing.T) {
// Pull from channels
counter := 0
timedOut := map[types.NodeID]struct{}{}
timedOut := map[p2p.NodeID]struct{}{}
for {
select {
case err := <-errorsCh:
@@ -203,7 +203,7 @@ func TestBlockPoolTimeout(t *testing.T) {
func TestBlockPoolRemovePeer(t *testing.T) {
peers := make(testPeers, 10)
for i := 0; i < 10; i++ {
peerID := types.NodeID(fmt.Sprintf("%d", i+1))
peerID := p2p.NodeID(fmt.Sprintf("%d", i+1))
height := int64(i + 1)
peers[peerID] = testPeer{peerID, 0, height, make(chan inputData)}
}
@@ -227,10 +227,10 @@ func TestBlockPoolRemovePeer(t *testing.T) {
assert.EqualValues(t, 10, pool.MaxPeerHeight())
// remove not-existing peer
assert.NotPanics(t, func() { pool.RemovePeer(types.NodeID("Superman")) })
assert.NotPanics(t, func() { pool.RemovePeer(p2p.NodeID("Superman")) })
// remove peer with biggest height
pool.RemovePeer(types.NodeID("10"))
pool.RemovePeer(p2p.NodeID("10"))
assert.EqualValues(t, 9, pool.MaxPeerHeight())
// remove all peers

View File

@@ -2,17 +2,14 @@ package v0
import (
"fmt"
"runtime/debug"
"sync"
"time"
bc "github.com/tendermint/tendermint/internal/blocksync"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/p2p"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
tmSync "github.com/tendermint/tendermint/libs/sync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
"github.com/tendermint/tendermint/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
@@ -29,23 +26,24 @@ var (
// TODO: Remove once p2p refactor is complete.
// ref: https://github.com/tendermint/tendermint/issues/5670
ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{
BlockSyncChannel: {
BlockchainChannel: {
MsgType: new(bcproto.Message),
Descriptor: &p2p.ChannelDescriptor{
ID: byte(BlockSyncChannel),
ID: byte(BlockchainChannel),
Priority: 5,
SendQueueCapacity: 1000,
RecvBufferCapacity: 1024,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: bc.MaxMsgSize,
MaxSendBytes: 100,
MaxSendBytes: 100,
},
},
}
)
const (
// BlockSyncChannel is a channel for blocks and status updates
BlockSyncChannel = p2p.ChannelID(0x40)
// BlockchainChannel is a channel for blocks and status updates
BlockchainChannel = p2p.ChannelID(0x40)
trySyncIntervalMS = 10
@@ -60,21 +58,21 @@ const (
)
type consensusReactor interface {
// For when we switch from block sync reactor to the consensus
// For when we switch from blockchain reactor and fast sync to the consensus
// machine.
SwitchToConsensus(state sm.State, skipWAL bool)
}
type peerError struct {
err error
peerID types.NodeID
peerID p2p.NodeID
}
func (e peerError) Error() string {
return fmt.Sprintf("error with peer %v: %s", e.peerID, e.err.Error())
}
// Reactor handles long-term catchup syncing.
// BlockchainReactor handles long-term catchup syncing.
type Reactor struct {
service.BaseService
@@ -85,19 +83,11 @@ type Reactor struct {
store *store.BlockStore
pool *BlockPool
consReactor consensusReactor
blockSync *tmSync.AtomicBool
fastSync bool
blockSyncCh *p2p.Channel
// blockSyncOutBridgeCh defines a channel that acts as a bridge between sending Envelope
// messages that the reactor will consume in processBlockSyncCh and receiving messages
// from the peer updates channel and other goroutines. We do this instead of directly
// sending on blockSyncCh.Out to avoid race conditions in the case where other goroutines
// send Envelopes directly to the to blockSyncCh.Out channel, since processBlockSyncCh
// may close the blockSyncCh.Out channel at the same time that other goroutines send to
// blockSyncCh.Out.
blockSyncOutBridgeCh chan p2p.Envelope
peerUpdates *p2p.PeerUpdates
closeCh chan struct{}
blockchainCh *p2p.Channel
peerUpdates *p2p.PeerUpdates
closeCh chan struct{}
requestsCh <-chan BlockRequest
errorsCh <-chan peerError
@@ -106,10 +96,6 @@ type Reactor struct {
// requestRoutine spawned goroutines when stopping the reactor and before
// stopping the p2p Channel(s).
poolWG sync.WaitGroup
metrics *cons.Metrics
syncStartTime time.Time
}
// NewReactor returns new reactor instance.
@@ -119,10 +105,9 @@ func NewReactor(
blockExec *sm.BlockExecutor,
store *store.BlockStore,
consReactor consensusReactor,
blockSyncCh *p2p.Channel,
blockchainCh *p2p.Channel,
peerUpdates *p2p.PeerUpdates,
blockSync bool,
metrics *cons.Metrics,
fastSync bool,
) (*Reactor, error) {
if state.LastBlockHeight != store.Height() {
return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height())
@@ -137,23 +122,20 @@ func NewReactor(
errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count.
r := &Reactor{
initialState: state,
blockExec: blockExec,
store: store,
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
consReactor: consReactor,
blockSync: tmSync.NewBool(blockSync),
requestsCh: requestsCh,
errorsCh: errorsCh,
blockSyncCh: blockSyncCh,
blockSyncOutBridgeCh: make(chan p2p.Envelope),
peerUpdates: peerUpdates,
closeCh: make(chan struct{}),
metrics: metrics,
syncStartTime: time.Time{},
initialState: state,
blockExec: blockExec,
store: store,
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
consReactor: consReactor,
fastSync: fastSync,
requestsCh: requestsCh,
errorsCh: errorsCh,
blockchainCh: blockchainCh,
peerUpdates: peerUpdates,
closeCh: make(chan struct{}),
}
r.BaseService = *service.NewBaseService(logger, "BlockSync", r)
r.BaseService = *service.NewBaseService(logger, "Blockchain", r)
return r, nil
}
@@ -162,10 +144,10 @@ func NewReactor(
// messages on that p2p channel accordingly. The caller must be sure to execute
// OnStop to ensure the outbound p2p Channels are closed.
//
// If blockSync is enabled, we also start the pool and the pool processing
// If fastSync is enabled, we also start the pool and the pool processing
// goroutine. If the pool fails to start, an error is returned.
func (r *Reactor) OnStart() error {
if r.blockSync.IsSet() {
if r.fastSync {
if err := r.pool.Start(); err != nil {
return err
}
@@ -174,7 +156,7 @@ func (r *Reactor) OnStart() error {
go r.poolRoutine(false)
}
go r.processBlockSyncCh()
go r.processBlockchainCh()
go r.processPeerUpdates()
return nil
@@ -183,7 +165,7 @@ func (r *Reactor) OnStart() error {
// OnStop stops the reactor by signaling to all spawned goroutines to exit and
// blocking until they all exit.
func (r *Reactor) OnStop() {
if r.blockSync.IsSet() {
if r.fastSync {
if err := r.pool.Stop(); err != nil {
r.Logger.Error("failed to stop pool", "err", err)
}
@@ -199,13 +181,13 @@ func (r *Reactor) OnStop() {
// Wait for all p2p Channels to be closed before returning. This ensures we
// can easily reason about synchronization of all p2p Channels and ensure no
// panics will occur.
<-r.blockSyncCh.Done()
<-r.blockchainCh.Done()
<-r.peerUpdates.Done()
}
// respondToPeer loads a block and sends it to the requesting peer, if we have it.
// Otherwise, we'll respond saying we do not have it.
func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID) {
func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID p2p.NodeID) {
block := r.store.LoadBlock(msg.Height)
if block != nil {
blockProto, err := block.ToProto()
@@ -214,7 +196,7 @@ func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID)
return
}
r.blockSyncCh.Out <- p2p.Envelope{
r.blockchainCh.Out <- p2p.Envelope{
To: peerID,
Message: &bcproto.BlockResponse{Block: blockProto},
}
@@ -223,16 +205,16 @@ func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID)
}
r.Logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height)
r.blockSyncCh.Out <- p2p.Envelope{
r.blockchainCh.Out <- p2p.Envelope{
To: peerID,
Message: &bcproto.NoBlockResponse{Height: msg.Height},
}
}
// handleBlockSyncMessage handles envelopes sent from peers on the
// BlockSyncChannel. It returns an error only if the Envelope.Message is unknown
// handleBlockchainMessage handles envelopes sent from peers on the
// BlockchainChannel. It returns an error only if the Envelope.Message is unknown
// for this channel. This should never be called outside of handleMessage.
func (r *Reactor) handleBlockSyncMessage(envelope p2p.Envelope) error {
func (r *Reactor) handleBlockchainMessage(envelope p2p.Envelope) error {
logger := r.Logger.With("peer", envelope.From)
switch msg := envelope.Message.(type) {
@@ -249,7 +231,7 @@ func (r *Reactor) handleBlockSyncMessage(envelope p2p.Envelope) error {
r.pool.AddBlock(envelope.From, block, block.Size())
case *bcproto.StatusRequest:
r.blockSyncCh.Out <- p2p.Envelope{
r.blockchainCh.Out <- p2p.Envelope{
To: envelope.From,
Message: &bcproto.StatusResponse{
Height: r.store.Height(),
@@ -277,19 +259,15 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
defer func() {
if e := recover(); e != nil {
err = fmt.Errorf("panic in processing message: %v", e)
r.Logger.Error(
"recovering from processing message panic",
"err", err,
"stack", string(debug.Stack()),
)
r.Logger.Error("recovering from processing message panic", "err", err)
}
}()
r.Logger.Debug("received message", "message", envelope.Message, "peer", envelope.From)
switch chID {
case BlockSyncChannel:
err = r.handleBlockSyncMessage(envelope)
case BlockchainChannel:
err = r.handleBlockchainMessage(envelope)
default:
err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope)
@@ -298,32 +276,28 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
return err
}
// processBlockSyncCh initiates a blocking process where we listen for and handle
// envelopes on the BlockSyncChannel and blockSyncOutBridgeCh. Any error encountered during
// message execution will result in a PeerError being sent on the BlockSyncChannel.
// When the reactor is stopped, we will catch the signal and close the p2p Channel
// processBlockchainCh initiates a blocking process where we listen for and handle
// envelopes on the BlockchainChannel. Any error encountered during message
// execution will result in a PeerError being sent on the BlockchainChannel. When
// the reactor is stopped, we will catch the signal and close the p2p Channel
// gracefully.
func (r *Reactor) processBlockSyncCh() {
defer r.blockSyncCh.Close()
func (r *Reactor) processBlockchainCh() {
defer r.blockchainCh.Close()
for {
select {
case envelope := <-r.blockSyncCh.In:
if err := r.handleMessage(r.blockSyncCh.ID, envelope); err != nil {
r.Logger.Error("failed to process message", "ch_id", r.blockSyncCh.ID, "envelope", envelope, "err", err)
r.blockSyncCh.Error <- p2p.PeerError{
case envelope := <-r.blockchainCh.In:
if err := r.handleMessage(r.blockchainCh.ID, envelope); err != nil {
r.Logger.Error("failed to process message", "ch_id", r.blockchainCh.ID, "envelope", envelope, "err", err)
r.blockchainCh.Error <- p2p.PeerError{
NodeID: envelope.From,
Err: err,
}
}
case envelope := <-r.blockSyncOutBridgeCh:
r.blockSyncCh.Out <- envelope
case <-r.closeCh:
r.Logger.Debug("stopped listening on block sync channel; closing...")
r.Logger.Debug("stopped listening on blockchain channel; closing...")
return
}
}
}
@@ -340,7 +314,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
switch peerUpdate.Status {
case p2p.PeerStatusUp:
// send a status update the newly added peer
r.blockSyncOutBridgeCh <- p2p.Envelope{
r.blockchainCh.Out <- p2p.Envelope{
To: peerUpdate.NodeID,
Message: &bcproto.StatusResponse{
Base: r.store.Base(),
@@ -371,10 +345,10 @@ func (r *Reactor) processPeerUpdates() {
}
}
// SwitchToBlockSync is called by the state sync reactor when switching to fast
// SwitchToFastSync is called by the state sync reactor when switching to fast
// sync.
func (r *Reactor) SwitchToBlockSync(state sm.State) error {
r.blockSync.Set()
func (r *Reactor) SwitchToFastSync(state sm.State) error {
r.fastSync = true
r.initialState = state
r.pool.height = state.LastBlockHeight + 1
@@ -382,8 +356,6 @@ func (r *Reactor) SwitchToBlockSync(state sm.State) error {
return err
}
r.syncStartTime = time.Now()
r.poolWG.Add(1)
go r.poolRoutine(true)
@@ -406,13 +378,13 @@ func (r *Reactor) requestRoutine() {
return
case request := <-r.requestsCh:
r.blockSyncOutBridgeCh <- p2p.Envelope{
r.blockchainCh.Out <- p2p.Envelope{
To: request.PeerID,
Message: &bcproto.BlockRequest{Height: request.Height},
}
case pErr := <-r.errorsCh:
r.blockSyncCh.Error <- p2p.PeerError{
r.blockchainCh.Error <- p2p.PeerError{
NodeID: pErr.peerID,
Err: pErr.err,
}
@@ -423,7 +395,7 @@ func (r *Reactor) requestRoutine() {
go func() {
defer r.poolWG.Done()
r.blockSyncOutBridgeCh <- p2p.Envelope{
r.blockchainCh.Out <- p2p.Envelope{
Broadcast: true,
Message: &bcproto.StatusRequest{},
}
@@ -496,8 +468,6 @@ FOR_LOOP:
r.Logger.Error("failed to stop pool", "err", err)
}
r.blockSync.UnSet()
if r.consReactor != nil {
r.consReactor.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
}
@@ -554,14 +524,14 @@ FOR_LOOP:
// NOTE: We've already removed the peer's request, but we still need
// to clean up the rest.
peerID := r.pool.RedoRequest(first.Height)
r.blockSyncCh.Error <- p2p.PeerError{
r.blockchainCh.Error <- p2p.PeerError{
NodeID: peerID,
Err: err,
}
peerID2 := r.pool.RedoRequest(second.Height)
if peerID2 != peerID {
r.blockSyncCh.Error <- p2p.PeerError{
r.blockchainCh.Error <- p2p.PeerError{
NodeID: peerID2,
Err: err,
}
@@ -578,20 +548,18 @@ FOR_LOOP:
// TODO: Same thing for app - but we would need a way to get the hash
// without persisting the state.
state, err = r.blockExec.ApplyBlock(state, firstID, first)
state, _, err = r.blockExec.ApplyBlock(state, firstID, first)
if err != nil {
// TODO: This is bad, are we zombie?
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
r.metrics.RecordConsMetrics(first)
blocksSynced++
if blocksSynced%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
r.Logger.Info(
"block sync rate",
"fast sync rate",
"height", r.pool.height,
"max_peer_height", r.pool.MaxPeerHeight(),
"blocks/s", lastRate,
@@ -608,31 +576,3 @@ FOR_LOOP:
}
}
}
func (r *Reactor) GetMaxPeerBlockHeight() int64 {
return r.pool.MaxPeerHeight()
}
func (r *Reactor) GetTotalSyncedTime() time.Duration {
if !r.blockSync.IsSet() || r.syncStartTime.IsZero() {
return time.Duration(0)
}
return time.Since(r.syncStartTime)
}
func (r *Reactor) GetRemainingSyncTime() time.Duration {
if !r.blockSync.IsSet() {
return time.Duration(0)
}
targetSyncs := r.pool.targetSyncBlocks()
currentSyncs := r.store.Height() - r.pool.startHeight + 1
lastSyncRate := r.pool.getLastSyncRate()
if currentSyncs < 0 || lastSyncRate < 0.001 {
return time.Duration(0)
}
remain := float64(targetSyncs-currentSyncs) / lastSyncRate
return time.Duration(int64(remain * float64(time.Second)))
}

View File

@@ -9,16 +9,13 @@ import (
abci "github.com/tendermint/tendermint/abci/types"
cfg "github.com/tendermint/tendermint/config"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/mempool/mock"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/log"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
"github.com/tendermint/tendermint/mempool/mock"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/p2p/p2ptest"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
sf "github.com/tendermint/tendermint/state/test/factory"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
dbm "github.com/tendermint/tm-db"
@@ -27,16 +24,16 @@ import (
type reactorTestSuite struct {
network *p2ptest.Network
logger log.Logger
nodes []types.NodeID
nodes []p2p.NodeID
reactors map[types.NodeID]*Reactor
app map[types.NodeID]proxy.AppConns
reactors map[p2p.NodeID]*Reactor
app map[p2p.NodeID]proxy.AppConns
blockSyncChannels map[types.NodeID]*p2p.Channel
peerChans map[types.NodeID]chan p2p.PeerUpdate
peerUpdates map[types.NodeID]*p2p.PeerUpdates
blockchainChannels map[p2p.NodeID]*p2p.Channel
peerChans map[p2p.NodeID]chan p2p.PeerUpdate
peerUpdates map[p2p.NodeID]*p2p.PeerUpdates
blockSync bool
fastSync bool
}
func setup(
@@ -53,19 +50,18 @@ func setup(
"must specify at least one block height (nodes)")
rts := &reactorTestSuite{
logger: log.TestingLogger().With("module", "block_sync", "testCase", t.Name()),
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
nodes: make([]types.NodeID, 0, numNodes),
reactors: make(map[types.NodeID]*Reactor, numNodes),
app: make(map[types.NodeID]proxy.AppConns, numNodes),
blockSyncChannels: make(map[types.NodeID]*p2p.Channel, numNodes),
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes),
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes),
blockSync: true,
logger: log.TestingLogger().With("module", "blockchain", "testCase", t.Name()),
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
nodes: make([]p2p.NodeID, 0, numNodes),
reactors: make(map[p2p.NodeID]*Reactor, numNodes),
app: make(map[p2p.NodeID]proxy.AppConns, numNodes),
blockchainChannels: make(map[p2p.NodeID]*p2p.Channel, numNodes),
peerChans: make(map[p2p.NodeID]chan p2p.PeerUpdate, numNodes),
peerUpdates: make(map[p2p.NodeID]*p2p.PeerUpdates, numNodes),
fastSync: true,
}
chDesc := p2p.ChannelDescriptor{ID: byte(BlockSyncChannel)}
rts.blockSyncChannels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(bcproto.Message), int(chBuf))
rts.blockchainChannels = rts.network.MakeChannelsNoCleanup(t, BlockchainChannel, new(bcproto.Message), int(chBuf))
i := 0
for nodeID := range rts.network.Nodes {
@@ -89,7 +85,7 @@ func setup(
}
func (rts *reactorTestSuite) addNode(t *testing.T,
nodeID types.NodeID,
nodeID p2p.NodeID,
genDoc *types.GenesisDoc,
privVal types.PrivValidator,
maxBlockHeight int64,
@@ -105,9 +101,11 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
stateStore := sm.NewStore(stateDB)
blockStore := store.NewBlockStore(blockDB)
state, err := sm.MakeGenesisState(genDoc)
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
require.NoError(t, err)
require.NoError(t, stateStore.Save(state))
db := dbm.NewMemDB()
stateStore = sm.NewStore(db)
blockExec := sm.NewBlockExecutor(
stateStore,
@@ -115,8 +113,8 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
rts.app[nodeID].Consensus(),
mock.Mempool{},
sm.EmptyEvidencePool{},
blockStore,
)
require.NoError(t, stateStore.Save(state))
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil)
@@ -125,11 +123,12 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
lastBlock := blockStore.LoadBlock(blockHeight - 1)
vote, err := factory.MakeVote(
privVal,
lastBlock.Header.ChainID, 0,
lastBlock.Header.Height, 0, 2,
vote, err := types.MakeVote(
lastBlock.Header.Height,
lastBlockMeta.BlockID,
state.Validators,
privVal,
lastBlock.Header.ChainID,
time.Now(),
)
require.NoError(t, err)
@@ -142,11 +141,11 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
)
}
thisBlock := sf.MakeBlock(state, blockHeight, lastCommit)
thisBlock := makeBlock(blockHeight, state, lastCommit)
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
require.NoError(t, err)
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
@@ -161,10 +160,9 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
blockExec,
blockStore,
nil,
rts.blockSyncChannels[nodeID],
rts.blockchainChannels[nodeID],
rts.peerUpdates[nodeID],
rts.blockSync,
cons.NopMetrics())
rts.fastSync)
require.NoError(t, err)
require.NoError(t, rts.reactors[nodeID].Start())
@@ -181,10 +179,10 @@ func (rts *reactorTestSuite) start(t *testing.T) {
}
func TestReactor_AbruptDisconnect(t *testing.T) {
config := cfg.ResetTestRoot("block_sync_reactor_test")
config := cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
genDoc, privVals := randGenesisDoc(config, 1, false, 30)
maxBlockHeight := int64(64)
rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
@@ -212,37 +210,14 @@ func TestReactor_AbruptDisconnect(t *testing.T) {
Status: p2p.PeerStatusDown,
NodeID: rts.nodes[0],
}
rts.network.Nodes[rts.nodes[1]].PeerManager.Disconnected(rts.nodes[0])
}
func TestReactor_SyncTime(t *testing.T) {
config := cfg.ResetTestRoot("block_sync_reactor_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
maxBlockHeight := int64(101)
rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
rts.start(t)
require.Eventually(
t,
func() bool {
return rts.reactors[rts.nodes[1]].GetRemainingSyncTime() > time.Nanosecond &&
rts.reactors[rts.nodes[1]].pool.getLastSyncRate() > 0.001
},
10*time.Second,
10*time.Millisecond,
"expected node to be partially synced",
)
require.NoError(t, rts.network.Nodes[rts.nodes[1]].PeerManager.Disconnected(rts.nodes[0]))
}
func TestReactor_NoBlockResponse(t *testing.T) {
config := cfg.ResetTestRoot("block_sync_reactor_test")
config := cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
genDoc, privVals := randGenesisDoc(config, 1, false, 30)
maxBlockHeight := int64(65)
rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
@@ -286,11 +261,11 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) {
// See: https://github.com/tendermint/tendermint/issues/6005
t.SkipNow()
config := cfg.ResetTestRoot("block_sync_reactor_test")
config := cfg.ResetTestRoot("blockchain_reactor_test")
defer os.RemoveAll(config.RootDir)
maxBlockHeight := int64(48)
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
genDoc, privVals := randGenesisDoc(config, 1, false, 30)
rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0}, 1000)
@@ -324,11 +299,8 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) {
//
// XXX: This causes a potential race condition.
// See: https://github.com/tendermint/tendermint/issues/6005
otherGenDoc, otherPrivVals := factory.RandGenesisDoc(config, 1, false, 30)
newNode := rts.network.MakeNode(t, p2ptest.NodeOptions{
MaxPeers: uint16(len(rts.nodes) + 1),
MaxConnected: uint16(len(rts.nodes) + 1),
})
otherGenDoc, otherPrivVals := randGenesisDoc(config, 1, false, 30)
newNode := rts.network.MakeNode(t)
rts.addNode(t, newNode.NodeID, otherGenDoc, otherPrivVals[0], maxBlockHeight)
// add a fake peer just so we do not wait for the consensus ticker to timeout

View File

@@ -0,0 +1,50 @@
package v0
import (
"sort"
cfg "github.com/tendermint/tendermint/config"
sm "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
func randGenesisDoc(
config *cfg.Config,
numValidators int,
randPower bool,
minPower int64,
) (*types.GenesisDoc, []types.PrivValidator) {
validators := make([]types.GenesisValidator, numValidators)
privValidators := make([]types.PrivValidator, numValidators)
for i := 0; i < numValidators; i++ {
val, privVal := types.RandValidator(randPower, minPower)
validators[i] = types.GenesisValidator{
PubKey: val.PubKey,
Power: val.VotingPower,
}
privValidators[i] = privVal
}
sort.Sort(types.PrivValidatorsByAddress(privValidators))
return &types.GenesisDoc{
GenesisTime: tmtime.Now(),
ChainID: config.ChainID(),
Validators: validators,
}, privValidators
}
func makeTxs(height int64) (txs []types.Tx) {
for i := 0; i < 10; i++ {
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
}
return txs
}
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
return block
}

View File

@@ -1,12 +1,14 @@
package behavior
import "github.com/tendermint/tendermint/types"
import (
"github.com/tendermint/tendermint/p2p"
)
// PeerBehavior is a struct describing a behavior a peer performed.
// `peerID` identifies the peer and reason characterizes the specific
// behavior performed by the peer.
type PeerBehavior struct {
peerID types.NodeID
peerID p2p.NodeID
reason interface{}
}
@@ -15,7 +17,7 @@ type badMessage struct {
}
// BadMessage returns a badMessage PeerBehavior.
func BadMessage(peerID types.NodeID, explanation string) PeerBehavior {
func BadMessage(peerID p2p.NodeID, explanation string) PeerBehavior {
return PeerBehavior{peerID: peerID, reason: badMessage{explanation}}
}
@@ -24,7 +26,7 @@ type messageOutOfOrder struct {
}
// MessageOutOfOrder returns a messagOutOfOrder PeerBehavior.
func MessageOutOfOrder(peerID types.NodeID, explanation string) PeerBehavior {
func MessageOutOfOrder(peerID p2p.NodeID, explanation string) PeerBehavior {
return PeerBehavior{peerID: peerID, reason: messageOutOfOrder{explanation}}
}
@@ -33,7 +35,7 @@ type consensusVote struct {
}
// ConsensusVote returns a consensusVote PeerBehavior.
func ConsensusVote(peerID types.NodeID, explanation string) PeerBehavior {
func ConsensusVote(peerID p2p.NodeID, explanation string) PeerBehavior {
return PeerBehavior{peerID: peerID, reason: consensusVote{explanation}}
}
@@ -42,6 +44,6 @@ type blockPart struct {
}
// BlockPart returns blockPart PeerBehavior.
func BlockPart(peerID types.NodeID, explanation string) PeerBehavior {
func BlockPart(peerID p2p.NodeID, explanation string) PeerBehavior {
return PeerBehavior{peerID: peerID, reason: blockPart{explanation}}
}

View File

@@ -3,9 +3,8 @@ package behavior
import (
"errors"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/types"
tmsync "github.com/tendermint/tendermint/libs/sync"
"github.com/tendermint/tendermint/p2p"
)
// Reporter provides an interface for reactors to report the behavior
@@ -52,14 +51,14 @@ func (spbr *SwitchReporter) Report(behavior PeerBehavior) error {
// behavior in manufactured scenarios.
type MockReporter struct {
mtx tmsync.RWMutex
pb map[types.NodeID][]PeerBehavior
pb map[p2p.NodeID][]PeerBehavior
}
// NewMockReporter returns a Reporter which records all reported
// behaviors in memory.
func NewMockReporter() *MockReporter {
return &MockReporter{
pb: map[types.NodeID][]PeerBehavior{},
pb: map[p2p.NodeID][]PeerBehavior{},
}
}
@@ -73,7 +72,7 @@ func (mpbr *MockReporter) Report(behavior PeerBehavior) error {
}
// GetBehaviors returns all behaviors reported on the peer identified by peerID.
func (mpbr *MockReporter) GetBehaviors(peerID types.NodeID) []PeerBehavior {
func (mpbr *MockReporter) GetBehaviors(peerID p2p.NodeID) []PeerBehavior {
mpbr.mtx.RLock()
defer mpbr.mtx.RUnlock()
if items, ok := mpbr.pb[peerID]; ok {

View File

@@ -4,14 +4,14 @@ import (
"sync"
"testing"
bh "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
"github.com/tendermint/tendermint/types"
bh "github.com/tendermint/tendermint/blockchain/v2/internal/behavior"
"github.com/tendermint/tendermint/p2p"
)
// TestMockReporter tests the MockReporter's ability to store reported
// peer behavior in memory indexed by the peerID.
func TestMockReporter(t *testing.T) {
var peerID types.NodeID = "MockPeer"
var peerID p2p.NodeID = "MockPeer"
pr := bh.NewMockReporter()
behaviors := pr.GetBehaviors(peerID)
@@ -34,7 +34,7 @@ func TestMockReporter(t *testing.T) {
}
type scriptItem struct {
peerID types.NodeID
peerID p2p.NodeID
behavior bh.PeerBehavior
}
@@ -76,10 +76,10 @@ func equalBehaviors(a []bh.PeerBehavior, b []bh.PeerBehavior) bool {
// freequencies that those behaviors occur.
func TestEqualPeerBehaviors(t *testing.T) {
var (
peerID types.NodeID = "MockPeer"
consensusVote = bh.ConsensusVote(peerID, "voted")
blockPart = bh.BlockPart(peerID, "blocked")
equals = []struct {
peerID p2p.NodeID = "MockPeer"
consensusVote = bh.ConsensusVote(peerID, "voted")
blockPart = bh.BlockPart(peerID, "blocked")
equals = []struct {
left []bh.PeerBehavior
right []bh.PeerBehavior
}{
@@ -128,7 +128,7 @@ func TestEqualPeerBehaviors(t *testing.T) {
func TestMockPeerBehaviorReporterConcurrency(t *testing.T) {
var (
behaviorScript = []struct {
peerID types.NodeID
peerID p2p.NodeID
behaviors []bh.PeerBehavior
}{
{"1", []bh.PeerBehavior{bh.ConsensusVote("1", "")}},

View File

@@ -4,8 +4,8 @@ import (
"errors"
"github.com/gogo/protobuf/proto"
"github.com/tendermint/tendermint/internal/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
"github.com/tendermint/tendermint/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@@ -42,7 +42,7 @@ const (
)
type consensusReactor interface {
// for when we switch from blockchain reactor and block sync to
// for when we switch from blockchain reactor and fast sync to
// the consensus machine
SwitchToConsensus(state state.State, skipWAL bool)
}

View File

@@ -3,6 +3,7 @@ package v2
import (
"fmt"
"github.com/tendermint/tendermint/p2p"
tmState "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@@ -12,8 +13,8 @@ import (
type pcBlockVerificationFailure struct {
priorityNormal
height int64
firstPeerID types.NodeID
secondPeerID types.NodeID
firstPeerID p2p.NodeID
secondPeerID p2p.NodeID
}
func (e pcBlockVerificationFailure) String() string {
@@ -25,7 +26,7 @@ func (e pcBlockVerificationFailure) String() string {
type pcBlockProcessed struct {
priorityNormal
height int64
peerID types.NodeID
peerID p2p.NodeID
}
func (e pcBlockProcessed) String() string {
@@ -45,7 +46,7 @@ func (p pcFinished) Error() string {
type queueItem struct {
block *types.Block
peerID types.NodeID
peerID p2p.NodeID
}
type blockQueue map[int64]queueItem
@@ -94,7 +95,7 @@ func (state *pcState) synced() bool {
return len(state.queue) <= 1
}
func (state *pcState) enqueue(peerID types.NodeID, block *types.Block, height int64) {
func (state *pcState) enqueue(peerID p2p.NodeID, block *types.Block, height int64) {
if item, ok := state.queue[height]; ok {
panic(fmt.Sprintf(
"duplicate block %d (%X) enqueued by processor (sent by %v; existing block %X from %v)",
@@ -109,7 +110,7 @@ func (state *pcState) height() int64 {
}
// purgePeer moves all unprocessed blocks from the queue
func (state *pcState) purgePeer(peerID types.NodeID) {
func (state *pcState) purgePeer(peerID p2p.NodeID) {
// what if height is less than state.height?
for height, item := range state.queue {
if item.peerID == peerID {
@@ -181,8 +182,6 @@ func (state *pcState) handle(event Event) (Event, error) {
panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err))
}
state.context.recordConsMetrics(first)
delete(state.queue, first.Height)
state.blocksSynced++

View File

@@ -3,7 +3,6 @@ package v2
import (
"fmt"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@@ -14,27 +13,24 @@ type processorContext interface {
saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit)
tmState() state.State
setState(state.State)
recordConsMetrics(block *types.Block)
}
type pContext struct {
store blockStore
applier blockApplier
state state.State
metrics *cons.Metrics
}
func newProcessorContext(st blockStore, ex blockApplier, s state.State, m *cons.Metrics) *pContext {
func newProcessorContext(st blockStore, ex blockApplier, s state.State) *pContext {
return &pContext{
store: st,
applier: ex,
state: s,
metrics: m,
}
}
func (pc *pContext) applyBlock(blockID types.BlockID, block *types.Block) error {
newState, err := pc.applier.ApplyBlock(pc.state, blockID, block)
newState, _, err := pc.applier.ApplyBlock(pc.state, blockID, block)
pc.state = newState
return err
}
@@ -55,10 +51,6 @@ func (pc *pContext) saveBlock(block *types.Block, blockParts *types.PartSet, see
pc.store.SaveBlock(block, blockParts, seenCommit)
}
func (pc *pContext) recordConsMetrics(block *types.Block) {
pc.metrics.RecordConsMetrics(block)
}
type mockPContext struct {
applicationBL []int64
verificationBL []int64
@@ -106,7 +98,3 @@ func (mpc *mockPContext) setState(state state.State) {
func (mpc *mockPContext) tmState() state.State {
return mpc.state
}
func (mpc *mockPContext) recordConsMetrics(block *types.Block) {
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/tendermint/tendermint/p2p"
tmState "github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@@ -39,7 +40,7 @@ func makeState(p *params) *pcState {
state := newPcState(context)
for _, item := range p.items {
state.enqueue(types.NodeID(item.pid), makePcBlock(item.height), item.height)
state.enqueue(p2p.NodeID(item.pid), makePcBlock(item.height), item.height)
}
state.blocksSynced = p.blocksSynced
@@ -47,7 +48,7 @@ func makeState(p *params) *pcState {
return state
}
func mBlockResponse(peerID types.NodeID, height int64) scBlockReceived {
func mBlockResponse(peerID p2p.NodeID, height int64) scBlockReceived {
return scBlockReceived{
peerID: peerID,
block: makePcBlock(height),

View File

@@ -7,14 +7,12 @@ import (
proto "github.com/gogo/protobuf/proto"
bc "github.com/tendermint/tendermint/internal/blocksync"
"github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
cons "github.com/tendermint/tendermint/internal/consensus"
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
"github.com/tendermint/tendermint/internal/p2p"
bc "github.com/tendermint/tendermint/blockchain"
"github.com/tendermint/tendermint/blockchain/v2/internal/behavior"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/sync"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
tmsync "github.com/tendermint/tendermint/libs/sync"
"github.com/tendermint/tendermint/p2p"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@@ -31,12 +29,12 @@ type blockStore interface {
Height() int64
}
// BlockchainReactor handles block sync protocol.
// BlockchainReactor handles fast sync protocol.
type BlockchainReactor struct {
p2p.BaseReactor
blockSync *sync.AtomicBool // enable block sync on start when it's been Set
stateSynced bool // set to true when SwitchToBlockSync is called by state sync
fastSync bool // if true, enable fast sync on start
stateSynced bool // set to true when SwitchToFastSync is called by state sync
scheduler *Routine
processor *Routine
logger log.Logger
@@ -44,44 +42,37 @@ type BlockchainReactor struct {
mtx tmsync.RWMutex
maxPeerHeight int64
syncHeight int64
events chan Event // non-nil during a block sync
events chan Event // non-nil during a fast sync
reporter behavior.Reporter
io iIO
store blockStore
syncStartTime time.Time
syncStartHeight int64
lastSyncRate float64 // # blocks sync per sec base on the last 100 blocks
}
type blockApplier interface {
ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, error)
ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, int64, error)
}
// XXX: unify naming in this package around tmState
func newReactor(state state.State, store blockStore, reporter behavior.Reporter,
blockApplier blockApplier, blockSync bool, metrics *cons.Metrics) *BlockchainReactor {
blockApplier blockApplier, fastSync bool) *BlockchainReactor {
initHeight := state.LastBlockHeight + 1
if initHeight == 1 {
initHeight = state.InitialHeight
}
scheduler := newScheduler(initHeight, time.Now())
pContext := newProcessorContext(store, blockApplier, state, metrics)
pContext := newProcessorContext(store, blockApplier, state)
// TODO: Fix naming to just newProcesssor
// newPcState requires a processorContext
processor := newPcState(pContext)
return &BlockchainReactor{
scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize),
processor: newRoutine("processor", processor.handle, chBufferSize),
store: store,
reporter: reporter,
logger: log.NewNopLogger(),
blockSync: sync.NewBool(blockSync),
syncStartHeight: initHeight,
syncStartTime: time.Time{},
lastSyncRate: 0,
scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize),
processor: newRoutine("processor", processor.handle, chBufferSize),
store: store,
reporter: reporter,
logger: log.NewNopLogger(),
fastSync: fastSync,
}
}
@@ -90,10 +81,9 @@ func NewBlockchainReactor(
state state.State,
blockApplier blockApplier,
store blockStore,
blockSync bool,
metrics *cons.Metrics) *BlockchainReactor {
fastSync bool) *BlockchainReactor {
reporter := behavior.NewMockReporter()
return newReactor(state, store, reporter, blockApplier, blockSync, metrics)
return newReactor(state, store, reporter, blockApplier, fastSync)
}
// SetSwitch implements Reactor interface.
@@ -137,22 +127,22 @@ func (r *BlockchainReactor) SetLogger(logger log.Logger) {
// Start implements cmn.Service interface
func (r *BlockchainReactor) Start() error {
r.reporter = behavior.NewSwitchReporter(r.BaseReactor.Switch)
if r.blockSync.IsSet() {
if r.fastSync {
err := r.startSync(nil)
if err != nil {
return fmt.Errorf("failed to start block sync: %w", err)
return fmt.Errorf("failed to start fast sync: %w", err)
}
}
return nil
}
// startSync begins a block sync, signaled by r.events being non-nil. If state is non-nil,
// startSync begins a fast sync, signaled by r.events being non-nil. If state is non-nil,
// the scheduler and processor is updated with this state on startup.
func (r *BlockchainReactor) startSync(state *state.State) error {
r.mtx.Lock()
defer r.mtx.Unlock()
if r.events != nil {
return errors.New("block sync already in progress")
return errors.New("fast sync already in progress")
}
r.events = make(chan Event, chBufferSize)
go r.scheduler.start()
@@ -167,7 +157,7 @@ func (r *BlockchainReactor) startSync(state *state.State) error {
return nil
}
// endSync ends a block sync
// endSync ends a fast sync
func (r *BlockchainReactor) endSync() {
r.mtx.Lock()
defer r.mtx.Unlock()
@@ -179,17 +169,11 @@ func (r *BlockchainReactor) endSync() {
r.processor.stop()
}
// SwitchToBlockSync is called by the state sync reactor when switching to block sync.
func (r *BlockchainReactor) SwitchToBlockSync(state state.State) error {
// SwitchToFastSync is called by the state sync reactor when switching to fast sync.
func (r *BlockchainReactor) SwitchToFastSync(state state.State) error {
r.stateSynced = true
state = state.Copy()
err := r.startSync(&state)
if err == nil {
r.syncStartTime = time.Now()
}
return err
return r.startSync(&state)
}
// reactor generated ticker events:
@@ -227,7 +211,7 @@ func (e rProcessBlock) String() string {
type bcBlockResponse struct {
priorityNormal
time time.Time
peerID types.NodeID
peerID p2p.NodeID
size int64
block *types.Block
}
@@ -241,7 +225,7 @@ func (resp bcBlockResponse) String() string {
type bcNoBlockResponse struct {
priorityNormal
time time.Time
peerID types.NodeID
peerID p2p.NodeID
height int64
}
@@ -254,7 +238,7 @@ func (resp bcNoBlockResponse) String() string {
type bcStatusResponse struct {
priorityNormal
time time.Time
peerID types.NodeID
peerID p2p.NodeID
base int64
height int64
}
@@ -267,7 +251,7 @@ func (resp bcStatusResponse) String() string {
// new peer is connected
type bcAddNewPeer struct {
priorityNormal
peerID types.NodeID
peerID p2p.NodeID
}
func (resp bcAddNewPeer) String() string {
@@ -277,7 +261,7 @@ func (resp bcAddNewPeer) String() string {
// existing peer is removed
type bcRemovePeer struct {
priorityHigh
peerID types.NodeID
peerID p2p.NodeID
reason interface{}
}
@@ -297,6 +281,7 @@ func (e bcResetState) String() string {
// Takes the channel as a parameter to avoid race conditions on r.events.
func (r *BlockchainReactor) demux(events <-chan Event) {
var lastRate = 0.0
var lastHundred = time.Now()
var (
@@ -427,27 +412,21 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
switch event := event.(type) {
case pcBlockProcessed:
r.setSyncHeight(event.height)
if (r.syncHeight-r.syncStartHeight)%100 == 0 {
newSyncRate := 100 / time.Since(lastHundred).Seconds()
if r.lastSyncRate == 0 {
r.lastSyncRate = newSyncRate
} else {
r.lastSyncRate = 0.9*r.lastSyncRate + 0.1*newSyncRate
}
r.logger.Info("block sync Rate", "height", r.syncHeight,
"max_peer_height", r.maxPeerHeight, "blocks/s", r.lastSyncRate)
if r.syncHeight%100 == 0 {
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
r.logger.Info("Fast Sync Rate", "height", r.syncHeight,
"max_peer_height", r.maxPeerHeight, "blocks/s", lastRate)
lastHundred = time.Now()
}
r.scheduler.send(event)
case pcBlockVerificationFailure:
r.scheduler.send(event)
case pcFinished:
r.logger.Info("block sync complete, switching to consensus")
r.logger.Info("Fast sync complete, switching to consensus")
if !r.io.trySwitchToConsensus(event.tmState, event.blocksSynced > 0 || r.stateSynced) {
r.logger.Error("Failed to switch to consensus reactor")
}
r.endSync()
r.blockSync.UnSet()
return
case noOpEvent:
default:
@@ -604,40 +583,8 @@ func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor {
ID: BlockchainChannel,
Priority: 5,
SendQueueCapacity: 2000,
RecvBufferCapacity: 1024,
RecvBufferCapacity: 50 * 4096,
RecvMessageCapacity: bc.MaxMsgSize,
},
}
}
func (r *BlockchainReactor) GetMaxPeerBlockHeight() int64 {
r.mtx.RLock()
defer r.mtx.RUnlock()
return r.maxPeerHeight
}
func (r *BlockchainReactor) GetTotalSyncedTime() time.Duration {
if !r.blockSync.IsSet() || r.syncStartTime.IsZero() {
return time.Duration(0)
}
return time.Since(r.syncStartTime)
}
func (r *BlockchainReactor) GetRemainingSyncTime() time.Duration {
if !r.blockSync.IsSet() {
return time.Duration(0)
}
r.mtx.RLock()
defer r.mtx.RUnlock()
targetSyncs := r.maxPeerHeight - r.syncStartHeight
currentSyncs := r.syncHeight - r.syncStartHeight + 1
if currentSyncs < 0 || r.lastSyncRate < 0.001 {
return time.Duration(0)
}
remain := float64(targetSyncs-currentSyncs) / r.lastSyncRate
return time.Duration(int64(remain * float64(time.Second)))
}

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"net"
"os"
"sort"
"sync"
"testing"
"time"
@@ -14,30 +15,28 @@ import (
dbm "github.com/tendermint/tm-db"
abci "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/blockchain/v2/internal/behavior"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
cons "github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/internal/mempool/mock"
"github.com/tendermint/tendermint/internal/p2p"
"github.com/tendermint/tendermint/internal/p2p/conn"
"github.com/tendermint/tendermint/internal/test/factory"
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
"github.com/tendermint/tendermint/mempool/mock"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/p2p/conn"
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
"github.com/tendermint/tendermint/proxy"
sm "github.com/tendermint/tendermint/state"
sf "github.com/tendermint/tendermint/state/test/factory"
tmstore "github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
type mockPeer struct {
service.Service
id types.NodeID
id p2p.NodeID
}
func (mp mockPeer) FlushStop() {}
func (mp mockPeer) ID() types.NodeID { return mp.id }
func (mp mockPeer) ID() p2p.NodeID { return mp.id }
func (mp mockPeer) RemoteIP() net.IP { return net.IP{} }
func (mp mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.RemoteIP(), Port: 8800} }
@@ -45,8 +44,8 @@ func (mp mockPeer) IsOutbound() bool { return true }
func (mp mockPeer) IsPersistent() bool { return true }
func (mp mockPeer) CloseConn() error { return nil }
func (mp mockPeer) NodeInfo() types.NodeInfo {
return types.NodeInfo{
func (mp mockPeer) NodeInfo() p2p.NodeInfo {
return p2p.NodeInfo{
NodeID: "",
ListenAddr: "",
}
@@ -86,9 +85,9 @@ type mockBlockApplier struct {
// XXX: Add whitelist/blacklist?
func (mba *mockBlockApplier) ApplyBlock(
state sm.State, blockID types.BlockID, block *types.Block,
) (sm.State, error) {
) (sm.State, int64, error) {
state.LastBlockHeight++
return state, nil
return state, 0, nil
}
type mockSwitchIo struct {
@@ -153,8 +152,8 @@ type testReactorParams struct {
mockA bool
}
func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor {
store, state, _ := newReactorStore(t, p.genDoc, p.privVals, p.startHeight)
func newTestReactor(p testReactorParams) *BlockchainReactor {
store, state, _ := newReactorStore(p.genDoc, p.privVals, p.startHeight)
reporter := behavior.NewMockReporter()
var appl blockApplier
@@ -166,17 +165,18 @@ func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor {
cc := proxy.NewLocalClientCreator(app)
proxyApp := proxy.NewAppConns(cc)
err := proxyApp.Start()
require.NoError(t, err)
if err != nil {
panic(fmt.Errorf("error start app: %w", err))
}
db := dbm.NewMemDB()
stateStore := sm.NewStore(db)
blockStore := tmstore.NewBlockStore(dbm.NewMemDB())
appl = sm.NewBlockExecutor(
stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}, blockStore)
err = stateStore.Save(state)
require.NoError(t, err)
appl = sm.NewBlockExecutor(stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{})
if err = stateStore.Save(state); err != nil {
panic(err)
}
}
r := newReactor(state, store, reporter, appl, true, cons.NopMetrics())
r := newReactor(state, store, reporter, appl, true)
logger := log.TestingLogger()
r.SetLogger(logger.With("module", "blockchain"))
@@ -366,7 +366,7 @@ func TestReactorHelperMode(t *testing.T) {
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30)
params := testReactorParams{
logger: log.TestingLogger(),
@@ -401,7 +401,7 @@ func TestReactorHelperMode(t *testing.T) {
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
reactor := newTestReactor(t, params)
reactor := newTestReactor(params)
mockSwitch := &mockSwitchIo{switchedToConsensus: false}
reactor.io = mockSwitch
err := reactor.Start()
@@ -419,7 +419,7 @@ func TestReactorHelperMode(t *testing.T) {
msgBz, err := proto.Marshal(msgProto)
require.NoError(t, err)
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
assert.Equal(t, old+1, mockSwitch.numStatusResponse)
case bcproto.BlockRequest:
if ev.Height > params.startHeight {
@@ -431,7 +431,7 @@ func TestReactorHelperMode(t *testing.T) {
msgBz, err := proto.Marshal(msgProto)
require.NoError(t, err)
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
assert.Equal(t, old+1, mockSwitch.numNoBlockResponse)
} else {
old := mockSwitch.numBlockResponse
@@ -442,7 +442,7 @@ func TestReactorHelperMode(t *testing.T) {
msgBz, err := proto.Marshal(msgProto)
require.NoError(t, err)
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
reactor.Receive(channelID, mockPeer{id: p2p.NodeID(step.peer)}, msgBz)
assert.Equal(t, old+1, mockSwitch.numBlockResponse)
}
}
@@ -456,9 +456,9 @@ func TestReactorHelperMode(t *testing.T) {
func TestReactorSetSwitchNil(t *testing.T) {
config := cfg.ResetTestRoot("blockchain_reactor_v2_test")
defer os.RemoveAll(config.RootDir)
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30)
reactor := newTestReactor(t, testReactorParams{
reactor := newTestReactor(testReactorParams{
logger: log.TestingLogger(),
genDoc: genDoc,
privVals: privVals,
@@ -469,18 +469,55 @@ func TestReactorSetSwitchNil(t *testing.T) {
assert.Nil(t, reactor.io)
}
//----------------------------------------------
// utility funcs
func makeTxs(height int64) (txs []types.Tx) {
for i := 0; i < 10; i++ {
txs = append(txs, types.Tx([]byte{byte(height), byte(i)}))
}
return txs
}
func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block {
block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, state.Validators.GetProposer().Address)
return block
}
type testApp struct {
abci.BaseApplication
}
func randGenesisDoc(chainID string, numValidators int, randPower bool, minPower int64) (
*types.GenesisDoc, []types.PrivValidator) {
validators := make([]types.GenesisValidator, numValidators)
privValidators := make([]types.PrivValidator, numValidators)
for i := 0; i < numValidators; i++ {
val, privVal := types.RandValidator(randPower, minPower)
validators[i] = types.GenesisValidator{
PubKey: val.PubKey,
Power: val.VotingPower,
}
privValidators[i] = privVal
}
sort.Sort(types.PrivValidatorsByAddress(privValidators))
return &types.GenesisDoc{
GenesisTime: tmtime.Now(),
ChainID: chainID,
Validators: validators,
}, privValidators
}
// Why are we importing the entire blockExecutor dependency graph here
// when we have the facilities to
func newReactorStore(
t *testing.T,
genDoc *types.GenesisDoc,
privVals []types.PrivValidator,
maxBlockHeight int64) (*tmstore.BlockStore, sm.State, *sm.BlockExecutor) {
t.Helper()
require.Len(t, privVals, 1)
maxBlockHeight int64) (*store.BlockStore, sm.State, *sm.BlockExecutor) {
if len(privVals) != 1 {
panic("only support one validator")
}
app := &testApp{}
cc := proxy.NewLocalClientCreator(app)
proxyApp := proxy.NewAppConns(cc)
@@ -490,15 +527,20 @@ func newReactorStore(
}
stateDB := dbm.NewMemDB()
blockStore := tmstore.NewBlockStore(dbm.NewMemDB())
blockStore := store.NewBlockStore(dbm.NewMemDB())
stateStore := sm.NewStore(stateDB)
state, err := sm.MakeGenesisState(genDoc)
require.NoError(t, err)
state, err := stateStore.LoadFromDBOrGenesisDoc(genDoc)
if err != nil {
panic(fmt.Errorf("error constructing state from genesis file: %w", err))
}
db := dbm.NewMemDB()
stateStore = sm.NewStore(db)
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(),
mock.Mempool{}, sm.EmptyEvidencePool{}, blockStore)
err = stateStore.Save(state)
require.NoError(t, err)
mock.Mempool{}, sm.EmptyEvidencePool{})
if err = stateStore.Save(state); err != nil {
panic(err)
}
// add blocks in
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
@@ -506,25 +548,30 @@ func newReactorStore(
if blockHeight > 1 {
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
lastBlock := blockStore.LoadBlock(blockHeight - 1)
vote, err := factory.MakeVote(
privVals[0],
lastBlock.Header.ChainID, 0,
lastBlock.Header.Height, 0, 2,
vote, err := types.MakeVote(
lastBlock.Header.Height,
lastBlockMeta.BlockID,
state.Validators,
privVals[0],
lastBlock.Header.ChainID,
time.Now(),
)
require.NoError(t, err)
if err != nil {
panic(err)
}
lastCommit = types.NewCommit(vote.Height, vote.Round,
lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
}
thisBlock := sf.MakeBlock(state, blockHeight, lastCommit)
thisBlock := makeBlock(blockHeight, state, lastCommit)
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
require.NoError(t, err)
state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock)
if err != nil {
panic(fmt.Errorf("error apply block: %w", err))
}
blockStore.SaveBlock(thisBlock, thisParts, lastCommit)
}

View File

@@ -8,6 +8,7 @@ import (
"sort"
"time"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/types"
)
@@ -25,7 +26,7 @@ func (e scFinishedEv) String() string {
// send a blockRequest message
type scBlockRequest struct {
priorityNormal
peerID types.NodeID
peerID p2p.NodeID
height int64
}
@@ -36,7 +37,7 @@ func (e scBlockRequest) String() string {
// a block has been received and validated by the scheduler
type scBlockReceived struct {
priorityNormal
peerID types.NodeID
peerID p2p.NodeID
block *types.Block
}
@@ -47,7 +48,7 @@ func (e scBlockReceived) String() string {
// scheduler detected a peer error
type scPeerError struct {
priorityHigh
peerID types.NodeID
peerID p2p.NodeID
reason error
}
@@ -58,7 +59,7 @@ func (e scPeerError) String() string {
// scheduler removed a set of peers (timed out or slow peer)
type scPeersPruned struct {
priorityHigh
peers []types.NodeID
peers []p2p.NodeID
}
func (e scPeersPruned) String() string {
@@ -125,7 +126,7 @@ func (e peerState) String() string {
}
type scPeer struct {
peerID types.NodeID
peerID p2p.NodeID
// initialized as New when peer is added, updated to Ready when statusUpdate is received,
// updated to Removed when peer is removed
@@ -142,7 +143,7 @@ func (p scPeer) String() string {
p.state, p.base, p.height, p.lastTouched, p.lastRate, p.peerID)
}
func newScPeer(peerID types.NodeID) *scPeer {
func newScPeer(peerID p2p.NodeID) *scPeer {
return &scPeer{
peerID: peerID,
state: peerStateNew,
@@ -163,14 +164,14 @@ type scheduler struct {
height int64
// lastAdvance tracks the last time a block execution happened.
// syncTimeout is the maximum time the scheduler waits to advance in the block sync process before finishing.
// syncTimeout is the maximum time the scheduler waits to advance in the fast sync process before finishing.
// This covers the cases where there are no peers or all peers have a lower height.
lastAdvance time.Time
syncTimeout time.Duration
// a map of peerID to scheduler specific peer struct `scPeer` used to keep
// track of peer specific state
peers map[types.NodeID]*scPeer
peers map[p2p.NodeID]*scPeer
peerTimeout time.Duration // maximum response time from a peer otherwise prune
minRecvRate int64 // minimum receive rate from peer otherwise prune
@@ -182,13 +183,13 @@ type scheduler struct {
blockStates map[int64]blockState
// a map of heights to the peer we are waiting a response from
pendingBlocks map[int64]types.NodeID
pendingBlocks map[int64]p2p.NodeID
// the time at which a block was put in blockStatePending
pendingTime map[int64]time.Time
// a map of heights to the peers that put the block in blockStateReceived
receivedBlocks map[int64]types.NodeID
receivedBlocks map[int64]p2p.NodeID
}
func (sc scheduler) String() string {
@@ -203,10 +204,10 @@ func newScheduler(initHeight int64, startTime time.Time) *scheduler {
syncTimeout: 60 * time.Second,
height: initHeight,
blockStates: make(map[int64]blockState),
peers: make(map[types.NodeID]*scPeer),
pendingBlocks: make(map[int64]types.NodeID),
peers: make(map[p2p.NodeID]*scPeer),
pendingBlocks: make(map[int64]p2p.NodeID),
pendingTime: make(map[int64]time.Time),
receivedBlocks: make(map[int64]types.NodeID),
receivedBlocks: make(map[int64]p2p.NodeID),
targetPending: 10, // TODO - pass as param
peerTimeout: 15 * time.Second, // TODO - pass as param
minRecvRate: 0, // int64(7680), TODO - pass as param
@@ -215,14 +216,14 @@ func newScheduler(initHeight int64, startTime time.Time) *scheduler {
return &sc
}
func (sc *scheduler) ensurePeer(peerID types.NodeID) *scPeer {
func (sc *scheduler) ensurePeer(peerID p2p.NodeID) *scPeer {
if _, ok := sc.peers[peerID]; !ok {
sc.peers[peerID] = newScPeer(peerID)
}
return sc.peers[peerID]
}
func (sc *scheduler) touchPeer(peerID types.NodeID, time time.Time) error {
func (sc *scheduler) touchPeer(peerID p2p.NodeID, time time.Time) error {
peer, ok := sc.peers[peerID]
if !ok {
return fmt.Errorf("couldn't find peer %s", peerID)
@@ -237,7 +238,7 @@ func (sc *scheduler) touchPeer(peerID types.NodeID, time time.Time) error {
return nil
}
func (sc *scheduler) removePeer(peerID types.NodeID) {
func (sc *scheduler) removePeer(peerID p2p.NodeID) {
peer, ok := sc.peers[peerID]
if !ok {
return
@@ -297,7 +298,7 @@ func (sc *scheduler) addNewBlocks() {
}
}
func (sc *scheduler) setPeerRange(peerID types.NodeID, base int64, height int64) error {
func (sc *scheduler) setPeerRange(peerID p2p.NodeID, base int64, height int64) error {
peer := sc.ensurePeer(peerID)
if peer.state == peerStateRemoved {
@@ -332,8 +333,8 @@ func (sc *scheduler) getStateAtHeight(height int64) blockState {
}
}
func (sc *scheduler) getPeersWithHeight(height int64) []types.NodeID {
peers := make([]types.NodeID, 0)
func (sc *scheduler) getPeersWithHeight(height int64) []p2p.NodeID {
peers := make([]p2p.NodeID, 0)
for _, peer := range sc.peers {
if peer.state != peerStateReady {
continue
@@ -345,8 +346,8 @@ func (sc *scheduler) getPeersWithHeight(height int64) []types.NodeID {
return peers
}
func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []types.NodeID {
prunable := make([]types.NodeID, 0)
func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.NodeID {
prunable := make([]p2p.NodeID, 0)
for peerID, peer := range sc.peers {
if peer.state != peerStateReady {
continue
@@ -365,7 +366,7 @@ func (sc *scheduler) setStateAtHeight(height int64, state blockState) {
}
// CONTRACT: peer exists and in Ready state.
func (sc *scheduler) markReceived(peerID types.NodeID, height int64, size int64, now time.Time) error {
func (sc *scheduler) markReceived(peerID p2p.NodeID, height int64, size int64, now time.Time) error {
peer := sc.peers[peerID]
if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID {
@@ -389,7 +390,7 @@ func (sc *scheduler) markReceived(peerID types.NodeID, height int64, size int64,
return nil
}
func (sc *scheduler) markPending(peerID types.NodeID, height int64, time time.Time) error {
func (sc *scheduler) markPending(peerID p2p.NodeID, height int64, time time.Time) error {
state := sc.getStateAtHeight(height)
if state != blockStateNew {
return fmt.Errorf("block %d should be in blockStateNew but is %s", height, state)
@@ -471,7 +472,7 @@ func (sc *scheduler) nextHeightToSchedule() int64 {
return min
}
func (sc *scheduler) pendingFrom(peerID types.NodeID) []int64 {
func (sc *scheduler) pendingFrom(peerID p2p.NodeID) []int64 {
var heights []int64
for height, pendingPeerID := range sc.pendingBlocks {
if pendingPeerID == peerID {
@@ -481,7 +482,7 @@ func (sc *scheduler) pendingFrom(peerID types.NodeID) []int64 {
return heights
}
func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) {
func (sc *scheduler) selectPeer(height int64) (p2p.NodeID, error) {
peers := sc.getPeersWithHeight(height)
if len(peers) == 0 {
return "", fmt.Errorf("cannot find peer for height %d", height)
@@ -489,7 +490,7 @@ func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) {
// create a map from number of pending requests to a list
// of peers having that number of pending requests.
pendingFrom := make(map[int][]types.NodeID)
pendingFrom := make(map[int][]p2p.NodeID)
for _, peerID := range peers {
numPending := len(sc.pendingFrom(peerID))
pendingFrom[numPending] = append(pendingFrom[numPending], peerID)
@@ -508,7 +509,7 @@ func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) {
}
// PeerByID is a list of peers sorted by peerID.
type PeerByID []types.NodeID
type PeerByID []p2p.NodeID
func (peers PeerByID) Len() int {
return len(peers)

View File

@@ -10,6 +10,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/types"
)
@@ -19,9 +20,9 @@ type scTestParams struct {
initHeight int64
height int64
allB []int64
pending map[int64]types.NodeID
pending map[int64]p2p.NodeID
pendingTime map[int64]time.Time
received map[int64]types.NodeID
received map[int64]p2p.NodeID
peerTimeout time.Duration
minRecvRate int64
targetPending int
@@ -40,7 +41,7 @@ func verifyScheduler(sc *scheduler) {
}
func newTestScheduler(params scTestParams) *scheduler {
peers := make(map[types.NodeID]*scPeer)
peers := make(map[p2p.NodeID]*scPeer)
var maxHeight int64
initHeight := params.initHeight
@@ -53,8 +54,8 @@ func newTestScheduler(params scTestParams) *scheduler {
}
for id, peer := range params.peers {
peer.peerID = types.NodeID(id)
peers[types.NodeID(id)] = peer
peer.peerID = p2p.NodeID(id)
peers[p2p.NodeID(id)] = peer
if maxHeight < peer.height {
maxHeight = peer.height
}
@@ -121,7 +122,7 @@ func TestScMaxHeights(t *testing.T) {
name: "one ready peer",
sc: scheduler{
height: 3,
peers: map[types.NodeID]*scPeer{"P1": {height: 6, state: peerStateReady}},
peers: map[p2p.NodeID]*scPeer{"P1": {height: 6, state: peerStateReady}},
},
wantMax: 6,
},
@@ -129,7 +130,7 @@ func TestScMaxHeights(t *testing.T) {
name: "ready and removed peers",
sc: scheduler{
height: 1,
peers: map[types.NodeID]*scPeer{
peers: map[p2p.NodeID]*scPeer{
"P1": {height: 4, state: peerStateReady},
"P2": {height: 10, state: peerStateRemoved}},
},
@@ -139,7 +140,7 @@ func TestScMaxHeights(t *testing.T) {
name: "removed peers",
sc: scheduler{
height: 1,
peers: map[types.NodeID]*scPeer{
peers: map[p2p.NodeID]*scPeer{
"P1": {height: 4, state: peerStateRemoved},
"P2": {height: 10, state: peerStateRemoved}},
},
@@ -149,7 +150,7 @@ func TestScMaxHeights(t *testing.T) {
name: "new peers",
sc: scheduler{
height: 1,
peers: map[types.NodeID]*scPeer{
peers: map[p2p.NodeID]*scPeer{
"P1": {base: -1, height: -1, state: peerStateNew},
"P2": {base: -1, height: -1, state: peerStateNew}},
},
@@ -159,7 +160,7 @@ func TestScMaxHeights(t *testing.T) {
name: "mixed peers",
sc: scheduler{
height: 1,
peers: map[types.NodeID]*scPeer{
peers: map[p2p.NodeID]*scPeer{
"P1": {height: -1, state: peerStateNew},
"P2": {height: 10, state: peerStateReady},
"P3": {height: 20, state: peerStateRemoved},
@@ -186,7 +187,7 @@ func TestScMaxHeights(t *testing.T) {
func TestScEnsurePeer(t *testing.T) {
type args struct {
peerID types.NodeID
peerID p2p.NodeID
}
tests := []struct {
name string
@@ -243,7 +244,7 @@ func TestScTouchPeer(t *testing.T) {
now := time.Now()
type args struct {
peerID types.NodeID
peerID p2p.NodeID
time time.Time
}
@@ -315,13 +316,13 @@ func TestScPrunablePeers(t *testing.T) {
name string
fields scTestParams
args args
wantResult []types.NodeID
wantResult []p2p.NodeID
}{
{
name: "no peers",
fields: scTestParams{peers: map[string]*scPeer{}},
args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100},
wantResult: []types.NodeID{},
wantResult: []p2p.NodeID{},
},
{
name: "mixed peers",
@@ -340,7 +341,7 @@ func TestScPrunablePeers(t *testing.T) {
"P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90},
}},
args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100},
wantResult: []types.NodeID{"P4", "P5", "P6"},
wantResult: []p2p.NodeID{"P4", "P5", "P6"},
},
}
@@ -360,7 +361,7 @@ func TestScPrunablePeers(t *testing.T) {
func TestScRemovePeer(t *testing.T) {
type args struct {
peerID types.NodeID
peerID p2p.NodeID
}
tests := []struct {
name string
@@ -423,13 +424,13 @@ func TestScRemovePeer(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
allB: []int64{1, 2, 3},
pending: map[int64]types.NodeID{1: "P1"},
pending: map[int64]p2p.NodeID{1: "P1"},
},
args: args{peerID: "P1"},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}},
allB: []int64{},
pending: map[int64]types.NodeID{},
pending: map[int64]p2p.NodeID{},
},
},
{
@@ -437,13 +438,13 @@ func TestScRemovePeer(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
allB: []int64{1, 2, 3},
received: map[int64]types.NodeID{1: "P1"},
received: map[int64]p2p.NodeID{1: "P1"},
},
args: args{peerID: "P1"},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}},
allB: []int64{},
received: map[int64]types.NodeID{},
received: map[int64]p2p.NodeID{},
},
},
{
@@ -451,15 +452,15 @@ func TestScRemovePeer(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]types.NodeID{1: "P1", 3: "P1"},
received: map[int64]types.NodeID{2: "P1", 4: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 3: "P1"},
received: map[int64]p2p.NodeID{2: "P1", 4: "P1"},
},
args: args{peerID: "P1"},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}},
allB: []int64{},
pending: map[int64]types.NodeID{},
received: map[int64]types.NodeID{},
pending: map[int64]p2p.NodeID{},
received: map[int64]p2p.NodeID{},
},
},
{
@@ -470,8 +471,8 @@ func TestScRemovePeer(t *testing.T) {
"P2": {height: 6, state: peerStateReady},
},
allB: []int64{1, 2, 3, 4, 5, 6},
pending: map[int64]types.NodeID{1: "P1", 3: "P2", 6: "P1"},
received: map[int64]types.NodeID{2: "P1", 4: "P2", 5: "P2"},
pending: map[int64]p2p.NodeID{1: "P1", 3: "P2", 6: "P1"},
received: map[int64]p2p.NodeID{2: "P1", 4: "P2", 5: "P2"},
},
args: args{peerID: "P1"},
wantFields: scTestParams{
@@ -480,8 +481,8 @@ func TestScRemovePeer(t *testing.T) {
"P2": {height: 6, state: peerStateReady},
},
allB: []int64{1, 2, 3, 4, 5, 6},
pending: map[int64]types.NodeID{3: "P2"},
received: map[int64]types.NodeID{4: "P2", 5: "P2"},
pending: map[int64]p2p.NodeID{3: "P2"},
received: map[int64]p2p.NodeID{4: "P2", 5: "P2"},
},
},
}
@@ -500,7 +501,7 @@ func TestScRemovePeer(t *testing.T) {
func TestScSetPeerRange(t *testing.T) {
type args struct {
peerID types.NodeID
peerID p2p.NodeID
base int64
height int64
}
@@ -621,25 +622,25 @@ func TestScGetPeersWithHeight(t *testing.T) {
name string
fields scTestParams
args args
wantResult []types.NodeID
wantResult []p2p.NodeID
}{
{
name: "no peers",
fields: scTestParams{peers: map[string]*scPeer{}},
args: args{height: 10},
wantResult: []types.NodeID{},
wantResult: []p2p.NodeID{},
},
{
name: "only new peers",
fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}},
args: args{height: 10},
wantResult: []types.NodeID{},
wantResult: []p2p.NodeID{},
},
{
name: "only Removed peers",
fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}},
args: args{height: 2},
wantResult: []types.NodeID{},
wantResult: []p2p.NodeID{},
},
{
name: "one Ready shorter peer",
@@ -648,7 +649,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{1, 2, 3, 4},
},
args: args{height: 5},
wantResult: []types.NodeID{},
wantResult: []p2p.NodeID{},
},
{
name: "one Ready equal peer",
@@ -657,7 +658,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{1, 2, 3, 4},
},
args: args{height: 4},
wantResult: []types.NodeID{"P1"},
wantResult: []p2p.NodeID{"P1"},
},
{
name: "one Ready higher peer",
@@ -667,7 +668,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{1, 2, 3, 4},
},
args: args{height: 4},
wantResult: []types.NodeID{"P1"},
wantResult: []p2p.NodeID{"P1"},
},
{
name: "one Ready higher peer at base",
@@ -677,7 +678,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{1, 2, 3, 4},
},
args: args{height: 4},
wantResult: []types.NodeID{"P1"},
wantResult: []p2p.NodeID{"P1"},
},
{
name: "one Ready higher peer with higher base",
@@ -687,7 +688,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{1, 2, 3, 4},
},
args: args{height: 4},
wantResult: []types.NodeID{},
wantResult: []p2p.NodeID{},
},
{
name: "multiple mixed peers",
@@ -702,7 +703,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
allB: []int64{8, 9, 10, 11},
},
args: args{height: 8},
wantResult: []types.NodeID{"P2", "P5"},
wantResult: []p2p.NodeID{"P2", "P5"},
},
}
@@ -724,7 +725,7 @@ func TestScMarkPending(t *testing.T) {
now := time.Now()
type args struct {
peerID types.NodeID
peerID p2p.NodeID
height int64
tm time.Time
}
@@ -820,14 +821,14 @@ func TestScMarkPending(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]types.NodeID{1: "P1"},
pending: map[int64]p2p.NodeID{1: "P1"},
pendingTime: map[int64]time.Time{1: now},
},
args: args{peerID: "P1", height: 2, tm: now.Add(time.Millisecond)},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Millisecond)},
},
},
@@ -850,7 +851,7 @@ func TestScMarkReceived(t *testing.T) {
now := time.Now()
type args struct {
peerID types.NodeID
peerID p2p.NodeID
height int64
size int64
tm time.Time
@@ -890,7 +891,7 @@ func TestScMarkReceived(t *testing.T) {
"P2": {height: 4, state: peerStateReady},
},
allB: []int64{1, 2, 3, 4},
pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
},
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
wantFields: scTestParams{
@@ -899,7 +900,7 @@ func TestScMarkReceived(t *testing.T) {
"P2": {height: 4, state: peerStateReady},
},
allB: []int64{1, 2, 3, 4},
pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
},
wantErr: true,
},
@@ -908,13 +909,13 @@ func TestScMarkReceived(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]types.NodeID{},
pending: map[int64]p2p.NodeID{},
},
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]types.NodeID{},
pending: map[int64]p2p.NodeID{},
},
wantErr: true,
},
@@ -923,14 +924,14 @@ func TestScMarkReceived(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)},
},
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)},
},
wantErr: true,
@@ -940,16 +941,16 @@ func TestScMarkReceived(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now},
},
args: args{peerID: "P1", height: 2, size: 1000, tm: now.Add(time.Millisecond)},
wantFields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]types.NodeID{1: "P1"},
pending: map[int64]p2p.NodeID{1: "P1"},
pendingTime: map[int64]time.Time{1: now},
received: map[int64]types.NodeID{2: "P1"},
received: map[int64]p2p.NodeID{2: "P1"},
},
},
}
@@ -990,7 +991,7 @@ func TestScMarkProcessed(t *testing.T) {
height: 2,
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{2},
pending: map[int64]types.NodeID{2: "P1"},
pending: map[int64]p2p.NodeID{2: "P1"},
pendingTime: map[int64]time.Time{2: now},
targetPending: 1,
},
@@ -1008,15 +1009,15 @@ func TestScMarkProcessed(t *testing.T) {
height: 1,
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{1, 2},
pending: map[int64]types.NodeID{2: "P1"},
pending: map[int64]p2p.NodeID{2: "P1"},
pendingTime: map[int64]time.Time{2: now},
received: map[int64]types.NodeID{1: "P1"}},
received: map[int64]p2p.NodeID{1: "P1"}},
args: args{height: 1},
wantFields: scTestParams{
height: 2,
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
allB: []int64{2},
pending: map[int64]types.NodeID{2: "P1"},
pending: map[int64]p2p.NodeID{2: "P1"},
pendingTime: map[int64]time.Time{2: now}},
},
}
@@ -1100,7 +1101,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now},
},
wantResult: false,
@@ -1110,7 +1111,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
},
wantResult: false,
},
@@ -1121,7 +1122,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
peers: map[string]*scPeer{
"P1": {height: 4, state: peerStateReady}},
allB: []int64{4},
received: map[int64]types.NodeID{4: "P1"},
received: map[int64]p2p.NodeID{4: "P1"},
},
wantResult: true,
},
@@ -1130,7 +1131,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]types.NodeID{2: "P1", 4: "P1"},
pending: map[int64]p2p.NodeID{2: "P1", 4: "P1"},
pendingTime: map[int64]time.Time{2: now, 4: now},
},
wantResult: false,
@@ -1178,7 +1179,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
initHeight: 1,
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now},
},
wantHeight: -1,
@@ -1189,7 +1190,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
initHeight: 1,
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
},
wantHeight: -1,
},
@@ -1208,7 +1209,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
initHeight: 1,
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
allB: []int64{1, 2, 3, 4},
pending: map[int64]types.NodeID{2: "P1"},
pending: map[int64]p2p.NodeID{2: "P1"},
pendingTime: map[int64]time.Time{2: now},
},
wantHeight: 1,
@@ -1238,7 +1239,7 @@ func TestScSelectPeer(t *testing.T) {
name string
fields scTestParams
args args
wantResult types.NodeID
wantResult p2p.NodeID
wantError bool
}{
{
@@ -1306,7 +1307,7 @@ func TestScSelectPeer(t *testing.T) {
"P1": {height: 8, state: peerStateReady},
"P2": {height: 9, state: peerStateReady}},
allB: []int64{4, 5, 6, 7, 8, 9},
pending: map[int64]types.NodeID{
pending: map[int64]p2p.NodeID{
4: "P1", 6: "P1",
5: "P2",
},
@@ -1322,7 +1323,7 @@ func TestScSelectPeer(t *testing.T) {
"P1": {height: 15, state: peerStateReady},
"P3": {height: 15, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
pending: map[int64]types.NodeID{
pending: map[int64]p2p.NodeID{
1: "P1", 2: "P1",
3: "P3", 4: "P3",
5: "P2", 6: "P2",
@@ -1391,7 +1392,7 @@ func TestScHandleBlockResponse(t *testing.T) {
now := time.Now()
block6FromP1 := bcBlockResponse{
time: now.Add(time.Millisecond),
peerID: types.NodeID("P1"),
peerID: p2p.NodeID("P1"),
size: 100,
block: makeScBlock(6),
}
@@ -1432,7 +1433,7 @@ func TestScHandleBlockResponse(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]types.NodeID{6: "P2"},
pending: map[int64]p2p.NodeID{6: "P2"},
pendingTime: map[int64]time.Time{6: now},
},
args: args{event: block6FromP1},
@@ -1443,7 +1444,7 @@ func TestScHandleBlockResponse(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]types.NodeID{6: "P1"},
pending: map[int64]p2p.NodeID{6: "P1"},
pendingTime: map[int64]time.Time{6: now.Add(time.Second)},
},
args: args{event: block6FromP1},
@@ -1454,7 +1455,7 @@ func TestScHandleBlockResponse(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]types.NodeID{6: "P1"},
pending: map[int64]p2p.NodeID{6: "P1"},
pendingTime: map[int64]time.Time{6: now},
},
args: args{event: block6FromP1},
@@ -1476,7 +1477,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
now := time.Now()
noBlock6FromP1 := bcNoBlockResponse{
time: now.Add(time.Millisecond),
peerID: types.NodeID("P1"),
peerID: p2p.NodeID("P1"),
height: 6,
}
@@ -1512,14 +1513,14 @@ func TestScHandleNoBlockResponse(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]types.NodeID{6: "P2"},
pending: map[int64]p2p.NodeID{6: "P2"},
pendingTime: map[int64]time.Time{6: now},
},
wantEvent: noOpEvent{},
wantFields: scTestParams{
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]types.NodeID{6: "P2"},
pending: map[int64]p2p.NodeID{6: "P2"},
pendingTime: map[int64]time.Time{6: now},
},
},
@@ -1528,7 +1529,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
fields: scTestParams{
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]types.NodeID{6: "P1"},
pending: map[int64]p2p.NodeID{6: "P1"},
pendingTime: map[int64]time.Time{6: now},
},
wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")},
@@ -1551,7 +1552,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
func TestScHandleBlockProcessed(t *testing.T) {
now := time.Now()
processed6FromP1 := pcBlockProcessed{
peerID: types.NodeID("P1"),
peerID: p2p.NodeID("P1"),
height: 6,
}
@@ -1578,7 +1579,7 @@ func TestScHandleBlockProcessed(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{6, 7, 8},
pending: map[int64]types.NodeID{6: "P1"},
pending: map[int64]p2p.NodeID{6: "P1"},
pendingTime: map[int64]time.Time{6: now},
},
args: args{event: processed6FromP1},
@@ -1590,7 +1591,7 @@ func TestScHandleBlockProcessed(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}},
allB: []int64{6, 7},
received: map[int64]types.NodeID{6: "P1", 7: "P1"},
received: map[int64]p2p.NodeID{6: "P1", 7: "P1"},
},
args: args{event: processed6FromP1},
wantEvent: scFinishedEv{},
@@ -1601,8 +1602,8 @@ func TestScHandleBlockProcessed(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{6, 7, 8},
pending: map[int64]types.NodeID{7: "P1", 8: "P1"},
received: map[int64]types.NodeID{6: "P1"},
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
received: map[int64]p2p.NodeID{6: "P1"},
},
args: args{event: processed6FromP1},
wantEvent: noOpEvent{},
@@ -1645,7 +1646,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
allB: []int64{6, 7, 8},
pending: map[int64]types.NodeID{6: "P1"},
pending: map[int64]p2p.NodeID{6: "P1"},
pendingTime: map[int64]time.Time{6: now},
},
args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}},
@@ -1657,7 +1658,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}},
allB: []int64{6, 7, 8},
pending: map[int64]types.NodeID{6: "P1"},
pending: map[int64]p2p.NodeID{6: "P1"},
pendingTime: map[int64]time.Time{6: now},
},
args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}},
@@ -1669,7 +1670,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
initHeight: 6,
peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}},
allB: []int64{6, 7},
received: map[int64]types.NodeID{6: "P1", 7: "P1"},
received: map[int64]p2p.NodeID{6: "P1", 7: "P1"},
},
args: args{event: pcBlockVerificationFailure{height: 7, firstPeerID: "P1", secondPeerID: "P1"}},
wantEvent: scFinishedEv{},
@@ -1680,8 +1681,8 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
initHeight: 5,
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}},
allB: []int64{5, 6, 7, 8},
pending: map[int64]types.NodeID{7: "P1", 8: "P1"},
received: map[int64]types.NodeID{5: "P1", 6: "P1"},
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
received: map[int64]p2p.NodeID{5: "P1", 6: "P1"},
},
args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P1"}},
wantEvent: noOpEvent{},
@@ -1696,8 +1697,8 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
"P3": {height: 8, state: peerStateReady},
},
allB: []int64{5, 6, 7, 8},
pending: map[int64]types.NodeID{7: "P1", 8: "P1"},
received: map[int64]types.NodeID{5: "P1", 6: "P1"},
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
received: map[int64]p2p.NodeID{5: "P1", 6: "P1"},
},
args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P2"}},
wantEvent: noOpEvent{},
@@ -1716,7 +1717,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
func TestScHandleAddNewPeer(t *testing.T) {
addP1 := bcAddNewPeer{
peerID: types.NodeID("P1"),
peerID: p2p.NodeID("P1"),
}
type args struct {
event bcAddNewPeer
@@ -1827,7 +1828,7 @@ func TestScHandleTryPrunePeer(t *testing.T) {
allB: []int64{1, 2, 3, 4, 5, 6, 7},
peerTimeout: time.Second},
args: args{event: pruneEv},
wantEvent: scPeersPruned{peers: []types.NodeID{"P4", "P5", "P6"}},
wantEvent: scPeersPruned{peers: []p2p.NodeID{"P4", "P5", "P6"}},
},
{
name: "mixed peers, finish after pruning",
@@ -1925,7 +1926,7 @@ func TestScHandleTrySchedule(t *testing.T) {
"P1": {height: 4, state: peerStateReady},
"P2": {height: 5, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5},
pending: map[int64]types.NodeID{
pending: map[int64]p2p.NodeID{
1: "P1", 2: "P1",
3: "P2",
},
@@ -1943,7 +1944,7 @@ func TestScHandleTrySchedule(t *testing.T) {
"P1": {height: 8, state: peerStateReady},
"P3": {height: 8, state: peerStateReady}},
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
pending: map[int64]types.NodeID{
pending: map[int64]p2p.NodeID{
1: "P1", 2: "P1",
3: "P3", 4: "P3",
5: "P2", 6: "P2",
@@ -2105,7 +2106,7 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
allB: []int64{1, 2, 3},
pending: map[int64]types.NodeID{1: "P1"},
pending: map[int64]p2p.NodeID{1: "P1"},
pendingTime: map[int64]time.Time{1: tick[1]},
height: 1,
},
@@ -2117,7 +2118,7 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
allB: []int64{1, 2, 3},
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2]},
height: 1,
},
@@ -2129,7 +2130,7 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
allB: []int64{1, 2, 3},
pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"},
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2], 3: tick[3]},
height: 1,
},
@@ -2141,9 +2142,9 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[4]}},
allB: []int64{1, 2, 3},
pending: map[int64]types.NodeID{2: "P1", 3: "P1"},
pending: map[int64]p2p.NodeID{2: "P1", 3: "P1"},
pendingTime: map[int64]time.Time{2: tick[2], 3: tick[3]},
received: map[int64]types.NodeID{1: "P1"},
received: map[int64]p2p.NodeID{1: "P1"},
height: 1,
},
},
@@ -2154,9 +2155,9 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[5]}},
allB: []int64{1, 2, 3},
pending: map[int64]types.NodeID{3: "P1"},
pending: map[int64]p2p.NodeID{3: "P1"},
pendingTime: map[int64]time.Time{3: tick[3]},
received: map[int64]types.NodeID{1: "P1", 2: "P1"},
received: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
height: 1,
},
},
@@ -2167,29 +2168,29 @@ func TestScHandle(t *testing.T) {
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
allB: []int64{1, 2, 3},
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"},
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
height: 1,
},
},
{ // processed block 1
args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 1}},
args: args{event: pcBlockProcessed{peerID: p2p.NodeID("P1"), height: 1}},
wantEvent: noOpEvent{},
wantSc: &scTestParams{
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
allB: []int64{2, 3},
received: map[int64]types.NodeID{2: "P1", 3: "P1"},
received: map[int64]p2p.NodeID{2: "P1", 3: "P1"},
height: 2,
},
},
{ // processed block 2
args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 2}},
args: args{event: pcBlockProcessed{peerID: p2p.NodeID("P1"), height: 2}},
wantEvent: scFinishedEv{},
wantSc: &scTestParams{
startTime: now,
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
allB: []int64{3},
received: map[int64]types.NodeID{3: "P1"},
received: map[int64]p2p.NodeID{3: "P1"},
height: 3,
},
},
@@ -2205,7 +2206,7 @@ func TestScHandle(t *testing.T) {
"P1": {height: 4, state: peerStateReady, lastTouched: tick[6]},
"P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
allB: []int64{1, 2, 3, 4},
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"},
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
height: 1,
},
args: args{event: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"}},
@@ -2216,7 +2217,7 @@ func TestScHandle(t *testing.T) {
"P1": {height: 4, state: peerStateRemoved, lastTouched: tick[6]},
"P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
allB: []int64{1, 2, 3},
received: map[int64]types.NodeID{},
received: map[int64]p2p.NodeID{},
height: 1,
},
},

View File

@@ -46,8 +46,9 @@ func main() {
rootCA = flag.String("rootcafile", "", "absolute path to root CA")
prometheusAddr = flag.String("prometheus-addr", "", "address for prometheus endpoint (host:port)")
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false).
With("module", "priv_val")
logger = log.NewTMLogger(
log.NewSyncWriter(os.Stdout),
).With("module", "priv_val")
)
flag.Parse()

View File

@@ -1,6 +1,8 @@
package debug
import (
"os"
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/libs/log"
@@ -15,7 +17,7 @@ var (
flagProfAddr = "pprof-laddr"
flagFrequency = "frequency"
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout))
)
// DebugCmd defines the root command containing subcommands that assist in

View File

@@ -6,19 +6,21 @@ import (
"github.com/spf13/cobra"
tmjson "github.com/tendermint/tendermint/libs/json"
"github.com/tendermint/tendermint/types"
"github.com/tendermint/tendermint/p2p"
)
// GenNodeKeyCmd allows the generation of a node key. It prints JSON-encoded
// NodeKey to the standard output.
var GenNodeKeyCmd = &cobra.Command{
Use: "gen-node-key",
Short: "Generate a new node key",
RunE: genNodeKey,
Use: "gen-node-key",
Aliases: []string{"gen_node_key"},
Short: "Generate a new node key",
RunE: genNodeKey,
PreRun: deprecateSnakeCase,
}
func genNodeKey(cmd *cobra.Command, args []string) error {
nodeKey := types.GenNodeKey()
nodeKey := p2p.GenNodeKey()
bz, err := tmjson.Marshal(nodeKey)
if err != nil {

View File

@@ -13,9 +13,11 @@ import (
// GenValidatorCmd allows the generation of a keypair for a
// validator.
var GenValidatorCmd = &cobra.Command{
Use: "gen-validator",
Short: "Generate new validator keypair",
RunE: genValidator,
Use: "gen-validator",
Aliases: []string{"gen_validator"},
Short: "Generate new validator keypair",
RunE: genValidator,
PreRun: deprecateSnakeCase,
}
func init() {

View File

@@ -10,9 +10,10 @@ import (
cfg "github.com/tendermint/tendermint/config"
tmos "github.com/tendermint/tendermint/libs/os"
tmrand "github.com/tendermint/tendermint/libs/rand"
tmtime "github.com/tendermint/tendermint/libs/time"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
// InitFilesCmd initializes a fresh Tendermint Core instance.
@@ -50,8 +51,8 @@ func initFilesWithConfig(config *cfg.Config) error {
if config.Mode == cfg.ModeValidator {
// private validator
privValKeyFile := config.PrivValidator.KeyFile()
privValStateFile := config.PrivValidator.StateFile()
privValKeyFile := config.PrivValidatorKeyFile()
privValStateFile := config.PrivValidatorStateFile()
if tmos.FileExists(privValKeyFile) {
pv, err = privval.LoadFilePV(privValKeyFile, privValStateFile)
if err != nil {
@@ -75,7 +76,7 @@ func initFilesWithConfig(config *cfg.Config) error {
if tmos.FileExists(nodeKeyFile) {
logger.Info("Found node key", "path", nodeKeyFile)
} else {
if _, err := types.LoadOrGenNodeKey(nodeKeyFile); err != nil {
if _, err := p2p.LoadOrGenNodeKey(nodeKeyFile); err != nil {
return err
}
logger.Info("Generated node key", "path", nodeKeyFile)

View File

@@ -1,87 +0,0 @@
package commands
import (
"context"
"os"
"os/signal"
"syscall"
"github.com/spf13/cobra"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/inspect"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/indexer/sink"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
)
// InspectCmd is the command for starting an inspect server.
var InspectCmd = &cobra.Command{
Use: "inspect",
Short: "Run an inspect server for investigating Tendermint state",
Long: `
inspect runs a subset of Tendermint's RPC endpoints that are useful for debugging
issues with Tendermint.
When the Tendermint consensus engine detects inconsistent state, it will crash the
tendermint process. Tendermint will not start up while in this inconsistent state.
The inspect command can be used to query the block and state store using Tendermint
RPC calls to debug issues of inconsistent state.
`,
RunE: runInspect,
}
func init() {
InspectCmd.Flags().
String("rpc.laddr",
config.RPC.ListenAddress, "RPC listenener address. Port required")
InspectCmd.Flags().
String("db-backend",
config.DBBackend, "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb")
InspectCmd.Flags().
String("db-dir", config.DBPath, "database directory")
}
func runInspect(cmd *cobra.Command, args []string) error {
ctx, cancel := context.WithCancel(cmd.Context())
defer cancel()
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGTERM, syscall.SIGINT)
go func() {
<-c
cancel()
}()
blockStoreDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "blockstore", Config: config})
if err != nil {
return err
}
blockStore := store.NewBlockStore(blockStoreDB)
stateDB, err := cfg.DefaultDBProvider(&cfg.DBContext{ID: "state", Config: config})
if err != nil {
if err := blockStoreDB.Close(); err != nil {
logger.Error("error closing block store db", "error", err)
}
return err
}
genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
if err != nil {
return err
}
sinks, err := sink.EventSinksFromConfig(config, cfg.DefaultDBProvider, genDoc.ChainID)
if err != nil {
return err
}
stateStore := state.NewStore(stateDB)
ins := inspect.New(config.RPC, blockStore, stateStore, sinks, logger)
logger.Info("starting inspect server")
if err := ins.Run(ctx); err != nil {
return err
}
return nil
}

View File

@@ -1,64 +0,0 @@
package commands
import (
"context"
"fmt"
"github.com/spf13/cobra"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/scripts/keymigrate"
)
func MakeKeyMigrateCommand() *cobra.Command {
cmd := &cobra.Command{
Use: "key-migrate",
Short: "Run Database key migration",
RunE: func(cmd *cobra.Command, args []string) error {
ctx, cancel := context.WithCancel(cmd.Context())
defer cancel()
contexts := []string{
// this is ordered to put the
// (presumably) biggest/most important
// subsets first.
"blockstore",
"state",
"peerstore",
"tx_index",
"evidence",
"light",
}
for idx, dbctx := range contexts {
logger.Info("beginning a key migration",
"dbctx", dbctx,
"num", idx+1,
"total", len(contexts),
)
db, err := cfg.DefaultDBProvider(&cfg.DBContext{
ID: dbctx,
Config: config,
})
if err != nil {
return fmt.Errorf("constructing database handle: %w", err)
}
if err = keymigrate.Migrate(ctx, db); err != nil {
return fmt.Errorf("running migration for context %q: %w",
dbctx, err)
}
}
logger.Info("completed database migration successfully")
return nil
},
}
// allow database info to be overridden via cli
addDBFlags(cmd)
return cmd
}

View File

@@ -1,6 +1,7 @@
package commands
import (
"bufio"
"context"
"errors"
"fmt"
@@ -66,8 +67,7 @@ var (
trustedHash []byte
trustLevelStr string
logLevel string
logFormat string
verbose bool
primaryKey = []byte("primary")
witnessesKey = []byte("witnesses")
@@ -91,8 +91,7 @@ func init() {
"trusting period that headers can be verified within. Should be significantly less than the unbonding period")
LightCmd.Flags().Int64Var(&trustedHeight, "height", 1, "Trusted header's height")
LightCmd.Flags().BytesHexVar(&trustedHash, "hash", []byte{}, "Trusted header's hash")
LightCmd.Flags().StringVar(&logLevel, "log-level", log.LogLevelInfo, "The logging level (debug|info|warn|error|fatal)")
LightCmd.Flags().StringVar(&logFormat, "log-format", log.LogFormatPlain, "The logging format (text|json)")
LightCmd.Flags().BoolVar(&verbose, "verbose", false, "Verbose output")
LightCmd.Flags().StringVar(&trustLevelStr, "trust-level", "1/3",
"trust level. Must be between 1/3 and 3/3",
)
@@ -102,10 +101,15 @@ func init() {
}
func runProxy(cmd *cobra.Command, args []string) error {
logger, err := log.NewDefaultLogger(logFormat, logLevel, false)
if err != nil {
return err
// Initialize logger.
logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout))
var option log.Option
if verbose {
option, _ = log.AllowLevel("debug")
} else {
option, _ = log.AllowLevel("info")
}
logger = log.NewFilter(logger, option)
chainID = args[0]
logger.Info("Creating client...", "chainID", chainID)
@@ -144,7 +148,25 @@ func runProxy(cmd *cobra.Command, args []string) error {
return fmt.Errorf("can't parse trust level: %w", err)
}
options := []light.Option{light.Logger(logger)}
options := []light.Option{
light.Logger(logger),
light.ConfirmationFunction(func(action string) bool {
fmt.Println(action)
scanner := bufio.NewScanner(os.Stdin)
for {
scanner.Scan()
response := scanner.Text()
switch response {
case "y", "Y":
return true
case "n", "N":
return false
default:
fmt.Println("please input 'Y' or 'n' and press ENTER")
}
}
}),
}
if sequential {
options = append(options, light.SequentialVerification())
@@ -152,21 +174,31 @@ func runProxy(cmd *cobra.Command, args []string) error {
options = append(options, light.SkippingVerification(trustLevel))
}
// Initiate the light client. If the trusted store already has blocks in it, this
// will be used else we use the trusted options.
c, err := light.NewHTTPClient(
context.Background(),
chainID,
light.TrustOptions{
Period: trustingPeriod,
Height: trustedHeight,
Hash: trustedHash,
},
primaryAddr,
witnessesAddrs,
dbs.New(db),
options...,
)
var c *light.Client
if trustedHeight > 0 && len(trustedHash) > 0 { // fresh installation
c, err = light.NewHTTPClient(
context.Background(),
chainID,
light.TrustOptions{
Period: trustingPeriod,
Height: trustedHeight,
Hash: trustedHash,
},
primaryAddr,
witnessesAddrs,
dbs.New(db),
options...,
)
} else { // continue from latest state
c, err = light.NewHTTPClientFromTrustedStore(
chainID,
trustingPeriod,
primaryAddr,
witnessesAddrs,
dbs.New(db),
options...,
)
}
if err != nil {
return err
}

View File

@@ -5,15 +5,17 @@ import (
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/internal/p2p/upnp"
tmjson "github.com/tendermint/tendermint/libs/json"
"github.com/tendermint/tendermint/p2p/upnp"
)
// ProbeUpnpCmd adds capabilities to test the UPnP functionality.
var ProbeUpnpCmd = &cobra.Command{
Use: "probe-upnp",
Short: "Test UPnP functionality",
RunE: probeUpnp,
Use: "probe-upnp",
Aliases: []string{"probe_upnp"},
Short: "Test UPnP functionality",
RunE: probeUpnp,
PreRun: deprecateSnakeCase,
}
func probeUpnp(cmd *cobra.Command, args []string) error {

View File

@@ -1,251 +0,0 @@
package commands
import (
"errors"
"fmt"
"strings"
"github.com/spf13/cobra"
tmdb "github.com/tendermint/tm-db"
abcitypes "github.com/tendermint/tendermint/abci/types"
tmcfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/internal/libs/progressbar"
ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/state"
"github.com/tendermint/tendermint/state/indexer"
"github.com/tendermint/tendermint/state/indexer/sink/kv"
"github.com/tendermint/tendermint/state/indexer/sink/psql"
"github.com/tendermint/tendermint/store"
"github.com/tendermint/tendermint/types"
)
const (
reindexFailed = "event re-index failed: "
)
// ReIndexEventCmd allows re-index the event by given block height interval
var ReIndexEventCmd = &cobra.Command{
Use: "reindex-event",
Short: "reindex events to the event store backends",
Long: `
reindex-event is an offline tooling to re-index block and tx events to the eventsinks,
you can run this command when the event store backend dropped/disconnected or you want to replace the backend.
The default start-height is 0, meaning the tooling will start reindex from the base block height(inclusive); and the
default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omits
either or both arguments.
`,
Example: `
tendermint reindex-event
tendermint reindex-event --start-height 2
tendermint reindex-event --end-height 10
tendermint reindex-event --start-height 2 --end-height 10
`,
Run: func(cmd *cobra.Command, args []string) {
bs, ss, err := loadStateAndBlockStore(config)
if err != nil {
fmt.Println(reindexFailed, err)
return
}
if err := checkValidHeight(bs); err != nil {
fmt.Println(reindexFailed, err)
return
}
es, err := loadEventSinks(config)
if err != nil {
fmt.Println(reindexFailed, err)
return
}
if err = eventReIndex(cmd, es, bs, ss); err != nil {
fmt.Println(reindexFailed, err)
return
}
fmt.Println("event re-index finished")
},
}
var (
startHeight int64
endHeight int64
)
func init() {
ReIndexEventCmd.Flags().Int64Var(&startHeight, "start-height", 0, "the block height would like to start for re-index")
ReIndexEventCmd.Flags().Int64Var(&endHeight, "end-height", 0, "the block height would like to finish for re-index")
}
func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) {
// Check duplicated sinks.
sinks := map[string]bool{}
for _, s := range cfg.TxIndex.Indexer {
sl := strings.ToLower(s)
if sinks[sl] {
return nil, errors.New("found duplicated sinks, please check the tx-index section in the config.toml")
}
sinks[sl] = true
}
eventSinks := []indexer.EventSink{}
for k := range sinks {
switch k {
case string(indexer.NULL):
return nil, errors.New("found null event sink, please check the tx-index section in the config.toml")
case string(indexer.KV):
store, err := tmcfg.DefaultDBProvider(&tmcfg.DBContext{ID: "tx_index", Config: cfg})
if err != nil {
return nil, err
}
eventSinks = append(eventSinks, kv.NewEventSink(store))
case string(indexer.PSQL):
conn := cfg.TxIndex.PsqlConn
if conn == "" {
return nil, errors.New("the psql connection settings cannot be empty")
}
es, err := psql.NewEventSink(conn, chainID)
if err != nil {
return nil, err
}
eventSinks = append(eventSinks, es)
default:
return nil, errors.New("unsupported event sink type")
}
}
if len(eventSinks) == 0 {
return nil, errors.New("no proper event sink can do event re-indexing," +
" please check the tx-index section in the config.toml")
}
if !indexer.IndexingEnabled(eventSinks) {
return nil, fmt.Errorf("no event sink has been enabled")
}
return eventSinks, nil
}
func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, error) {
dbType := tmdb.BackendType(cfg.DBBackend)
// Get BlockStore
blockStoreDB, err := tmdb.NewDB("blockstore", dbType, cfg.DBDir())
if err != nil {
return nil, nil, err
}
blockStore := store.NewBlockStore(blockStoreDB)
// Get StateStore
stateDB, err := tmdb.NewDB("state", dbType, cfg.DBDir())
if err != nil {
return nil, nil, err
}
stateStore := state.NewStore(stateDB)
return blockStore, stateStore, nil
}
func eventReIndex(cmd *cobra.Command, es []indexer.EventSink, bs state.BlockStore, ss state.Store) error {
var bar progressbar.Bar
bar.NewOption(startHeight-1, endHeight)
fmt.Println("start re-indexing events:")
defer bar.Finish()
for i := startHeight; i <= endHeight; i++ {
select {
case <-cmd.Context().Done():
return fmt.Errorf("event re-index terminated at height %d: %w", i, cmd.Context().Err())
default:
b := bs.LoadBlock(i)
if b == nil {
return fmt.Errorf("not able to load block at height %d from the blockstore", i)
}
r, err := ss.LoadABCIResponses(i)
if err != nil {
return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", i)
}
e := types.EventDataNewBlockHeader{
Header: b.Header,
NumTxs: int64(len(b.Txs)),
ResultBeginBlock: *r.BeginBlock,
ResultEndBlock: *r.EndBlock,
}
var batch *indexer.Batch
if e.NumTxs > 0 {
batch = indexer.NewBatch(e.NumTxs)
for i, tx := range b.Data.Txs {
tr := abcitypes.TxResult{
Height: b.Height,
Index: uint32(i),
Tx: tx,
Result: *(r.DeliverTxs[i]),
}
_ = batch.Add(&tr)
}
}
for _, sink := range es {
if err := sink.IndexBlockEvents(e); err != nil {
return fmt.Errorf("block event re-index at height %d failed: %w", i, err)
}
if batch != nil {
if err := sink.IndexTxEvents(batch.Ops); err != nil {
return fmt.Errorf("tx event re-index at height %d failed: %w", i, err)
}
}
}
}
bar.Play(i)
}
return nil
}
func checkValidHeight(bs state.BlockStore) error {
base := bs.Base()
if startHeight == 0 {
startHeight = base
fmt.Printf("set the start block height to the base height of the blockstore %d \n", base)
}
if startHeight < base {
return fmt.Errorf("%s (requested start height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, startHeight, base)
}
height := bs.Height()
if startHeight > height {
return fmt.Errorf(
"%s (requested start height: %d, store height: %d)", ctypes.ErrHeightNotAvailable, startHeight, height)
}
if endHeight == 0 || endHeight > height {
endHeight = height
fmt.Printf("set the end block height to the latest height of the blockstore %d \n", height)
}
if endHeight < base {
return fmt.Errorf(
"%s (requested end height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, endHeight, base)
}
if endHeight < startHeight {
return fmt.Errorf(
"%s (requested the end height: %d is less than the start height: %d)",
ctypes.ErrInvalidRequest, startHeight, endHeight)
}
return nil
}

View File

@@ -1,171 +0,0 @@
package commands
import (
"context"
"errors"
"testing"
"github.com/spf13/cobra"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
abcitypes "github.com/tendermint/tendermint/abci/types"
tmcfg "github.com/tendermint/tendermint/config"
prototmstate "github.com/tendermint/tendermint/proto/tendermint/state"
"github.com/tendermint/tendermint/state/indexer"
"github.com/tendermint/tendermint/state/mocks"
"github.com/tendermint/tendermint/types"
)
const (
height int64 = 10
base int64 = 2
)
func setupReIndexEventCmd() *cobra.Command {
reIndexEventCmd := &cobra.Command{
Use: ReIndexEventCmd.Use,
Run: func(cmd *cobra.Command, args []string) {},
}
_ = reIndexEventCmd.ExecuteContext(context.Background())
return reIndexEventCmd
}
func TestReIndexEventCheckHeight(t *testing.T) {
mockBlockStore := &mocks.BlockStore{}
mockBlockStore.
On("Base").Return(base).
On("Height").Return(height)
testCases := []struct {
startHeight int64
endHeight int64
validHeight bool
}{
{0, 0, true},
{0, base, true},
{0, base - 1, false},
{0, height, true},
{0, height + 1, true},
{0, 0, true},
{base - 1, 0, false},
{base, 0, true},
{base, base, true},
{base, base - 1, false},
{base, height, true},
{base, height + 1, true},
{height, 0, true},
{height, base, false},
{height, height - 1, false},
{height, height, true},
{height, height + 1, true},
{height + 1, 0, false},
}
for _, tc := range testCases {
startHeight = tc.startHeight
endHeight = tc.endHeight
err := checkValidHeight(mockBlockStore)
if tc.validHeight {
require.NoError(t, err)
} else {
require.Error(t, err)
}
}
}
func TestLoadEventSink(t *testing.T) {
testCases := []struct {
sinks []string
connURL string
loadErr bool
}{
{[]string{}, "", true},
{[]string{"NULL"}, "", true},
{[]string{"KV"}, "", false},
{[]string{"KV", "KV"}, "", true},
{[]string{"PSQL"}, "", true}, // true because empty connect url
{[]string{"PSQL"}, "wrongUrl", true}, // true because wrong connect url
// skip to test PSQL connect with correct url
{[]string{"UnsupportedSinkType"}, "wrongUrl", true},
}
for _, tc := range testCases {
cfg := tmcfg.TestConfig()
cfg.TxIndex.Indexer = tc.sinks
cfg.TxIndex.PsqlConn = tc.connURL
_, err := loadEventSinks(cfg)
if tc.loadErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
}
}
func TestLoadBlockStore(t *testing.T) {
bs, ss, err := loadStateAndBlockStore(tmcfg.TestConfig())
require.NoError(t, err)
require.NotNil(t, bs)
require.NotNil(t, ss)
}
func TestReIndexEvent(t *testing.T) {
mockBlockStore := &mocks.BlockStore{}
mockStateStore := &mocks.Store{}
mockEventSink := &mocks.EventSink{}
mockBlockStore.
On("Base").Return(base).
On("Height").Return(height).
On("LoadBlock", base).Return(nil).Once().
On("LoadBlock", base).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}}).
On("LoadBlock", height).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}})
mockEventSink.
On("Type").Return(indexer.KV).
On("IndexBlockEvents", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(errors.New("")).Once().
On("IndexBlockEvents", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(nil).
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(errors.New("")).Once().
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(nil)
dtx := abcitypes.ResponseDeliverTx{}
abciResp := &prototmstate.ABCIResponses{
DeliverTxs: []*abcitypes.ResponseDeliverTx{&dtx},
EndBlock: &abcitypes.ResponseEndBlock{},
BeginBlock: &abcitypes.ResponseBeginBlock{},
}
mockStateStore.
On("LoadABCIResponses", base).Return(nil, errors.New("")).Once().
On("LoadABCIResponses", base).Return(abciResp, nil).
On("LoadABCIResponses", height).Return(abciResp, nil)
testCases := []struct {
startHeight int64
endHeight int64
reIndexErr bool
}{
{base, height, true}, // LoadBlock error
{base, height, true}, // LoadABCIResponses error
{base, height, true}, // index block event error
{base, height, true}, // index tx event error
{base, base, false},
{height, height, false},
}
for _, tc := range testCases {
startHeight = tc.startHeight
endHeight = tc.endHeight
err := eventReIndex(setupReIndexEventCmd(), []indexer.EventSink{mockEventSink}, mockBlockStore, mockStateStore)
if tc.reIndexErr {
require.Error(t, err)
} else {
require.NoError(t, err)
}
}
}

View File

@@ -2,7 +2,8 @@ package commands
import (
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/internal/consensus"
"github.com/tendermint/tendermint/consensus"
)
// ReplayCmd allows replaying of messages from the WAL.
@@ -17,9 +18,11 @@ var ReplayCmd = &cobra.Command{
// ReplayConsoleCmd allows replaying of messages from the WAL in a
// console.
var ReplayConsoleCmd = &cobra.Command{
Use: "replay-console",
Short: "Replay messages from WAL in a console",
Use: "replay-console",
Aliases: []string{"replay_console"},
Short: "Replay messages from WAL in a console",
Run: func(cmd *cobra.Command, args []string) {
consensus.RunReplayFile(config.BaseConfig, config.Consensus, true)
},
PreRun: deprecateSnakeCase,
}

View File

@@ -14,9 +14,11 @@ import (
// ResetAllCmd removes the database of this Tendermint core
// instance.
var ResetAllCmd = &cobra.Command{
Use: "unsafe-reset-all",
Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state",
RunE: resetAll,
Use: "unsafe-reset-all",
Aliases: []string{"unsafe_reset_all"},
Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state",
RunE: resetAll,
PreRun: deprecateSnakeCase,
}
var keepAddrBook bool
@@ -29,22 +31,24 @@ func init() {
// ResetPrivValidatorCmd resets the private validator files.
var ResetPrivValidatorCmd = &cobra.Command{
Use: "unsafe-reset-priv-validator",
Short: "(unsafe) Reset this node's validator to genesis state",
RunE: resetPrivValidator,
Use: "unsafe-reset-priv-validator",
Aliases: []string{"unsafe_reset_priv_validator"},
Short: "(unsafe) Reset this node's validator to genesis state",
RunE: resetPrivValidator,
PreRun: deprecateSnakeCase,
}
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func resetAll(cmd *cobra.Command, args []string) error {
return ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidator.KeyFile(),
config.PrivValidator.StateFile(), logger)
return ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidatorKeyFile(),
config.PrivValidatorStateFile(), logger)
}
// XXX: this is totally unsafe.
// it's only suitable for testnets.
func resetPrivValidator(cmd *cobra.Command, args []string) error {
return resetFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile(), logger)
return resetFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile(), logger)
}
// ResetAll removes address book files plus all data, and resets the privValdiator data.

View File

@@ -2,18 +2,22 @@ package commands
import (
"fmt"
"os"
"strings"
"time"
"github.com/spf13/cobra"
"github.com/spf13/viper"
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/cli"
tmflags "github.com/tendermint/tendermint/libs/cli/flags"
"github.com/tendermint/tendermint/libs/log"
)
var (
config = cfg.DefaultConfig()
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout))
ctxTimeout = 4 * time.Second
)
@@ -55,12 +59,27 @@ var RootCmd = &cobra.Command{
return err
}
logger, err = log.NewDefaultLogger(config.LogFormat, config.LogLevel, false)
if config.LogFormat == cfg.LogFormatJSON {
logger = log.NewTMJSONLogger(log.NewSyncWriter(os.Stdout))
}
logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel)
if err != nil {
return err
}
if viper.GetBool(cli.TraceFlag) {
logger = log.NewTracingLogger(logger)
}
logger = logger.With("module", "main")
return nil
},
}
// deprecateSnakeCase is a util function for 0.34.1. Should be removed in 0.35
func deprecateSnakeCase(cmd *cobra.Command, args []string) {
if strings.Contains(cmd.CalledAs(), "_") {
fmt.Println("Deprecated: snake_case commands will be replaced by hyphen-case commands in the next major release")
}
}

View File

@@ -18,6 +18,10 @@ import (
tmos "github.com/tendermint/tendermint/libs/os"
)
var (
defaultRoot = os.ExpandEnv("$HOME/.some/test/dir")
)
// clearConfig clears env vars, the given root dir, and resets viper.
func clearConfig(dir string) {
if err := os.Unsetenv("TMHOME"); err != nil {
@@ -48,10 +52,10 @@ func testRootCmd() *cobra.Command {
}
func testSetup(rootDir string, args []string, env map[string]string) error {
clearConfig(rootDir)
clearConfig(defaultRoot)
rootCmd := testRootCmd()
cmd := cli.PrepareBaseCmd(rootCmd, "TM", rootDir)
cmd := cli.PrepareBaseCmd(rootCmd, "TM", defaultRoot)
// run with the args and env
args = append([]string{rootCmd.Use}, args...)
@@ -59,7 +63,6 @@ func testSetup(rootDir string, args []string, env map[string]string) error {
}
func TestRootHome(t *testing.T) {
defaultRoot := t.TempDir()
newRoot := filepath.Join(defaultRoot, "something-else")
cases := []struct {
args []string
@@ -102,7 +105,6 @@ func TestRootFlagsEnv(t *testing.T) {
{nil, map[string]string{"TM_LOG_LEVEL": "debug"}, "debug"}, // right env
}
defaultRoot := t.TempDir()
for i, tc := range cases {
idxString := strconv.Itoa(i)
@@ -116,7 +118,7 @@ func TestRootFlagsEnv(t *testing.T) {
func TestRootConfig(t *testing.T) {
// write non-default config
nonDefaultLogLvl := "debug"
nonDefaultLogLvl := "abc:debug"
cvals := map[string]string{
"log-level": nonDefaultLogLvl,
}
@@ -127,13 +129,12 @@ func TestRootConfig(t *testing.T) {
logLvl string
}{
{nil, nil, nonDefaultLogLvl}, // should load config
{[]string{"--log-level=info"}, nil, "info"}, // flag over rides
{nil, map[string]string{"TM_LOG_LEVEL": "info"}, "info"}, // env over rides
{nil, nil, nonDefaultLogLvl}, // should load config
{[]string{"--log-level=abc:info"}, nil, "abc:info"}, // flag over rides
{nil, map[string]string{"TM_LOG_LEVEL": "abc:info"}, "abc:info"}, // env over rides
}
for i, tc := range cases {
defaultRoot := t.TempDir()
idxString := strconv.Itoa(i)
clearConfig(defaultRoot)

View File

@@ -3,8 +3,6 @@ package commands
import (
"bytes"
"crypto/sha256"
"errors"
"flag"
"fmt"
"io"
"os"
@@ -13,6 +11,7 @@ import (
cfg "github.com/tendermint/tendermint/config"
tmos "github.com/tendermint/tendermint/libs/os"
nm "github.com/tendermint/tendermint/node"
)
var (
@@ -31,26 +30,11 @@ func AddNodeFlags(cmd *cobra.Command) {
// priv val flags
cmd.Flags().String(
"priv-validator-laddr",
config.PrivValidator.ListenAddr,
config.PrivValidatorListenAddr,
"socket address to listen on for connections from external priv-validator process")
// node flags
cmd.Flags().Bool("blocksync.enable", config.BlockSync.Enable, "enable fast blockchain syncing")
// TODO (https://github.com/tendermint/tendermint/issues/6908): remove this check after the v0.35 release cycle
// This check was added to give users an upgrade prompt to use the new flag for syncing.
//
// The pflag package does not have a native way to print a depcrecation warning
// and return an error. This logic was added to print a deprecation message to the user
// and then crash if the user attempts to use the old --fast-sync flag.
fs := flag.NewFlagSet("", flag.ExitOnError)
fs.Func("fast-sync", "deprecated",
func(string) error {
return errors.New("--fast-sync has been deprecated, please use --blocksync.enable")
})
cmd.Flags().AddGoFlagSet(fs)
cmd.Flags().MarkHidden("fast-sync") //nolint:errcheck
cmd.Flags().Bool("fast-sync", config.FastSyncMode, "fast blockchain syncing")
cmd.Flags().BytesHexVar(
&genesisHash,
"genesis-hash",
@@ -65,7 +49,9 @@ func AddNodeFlags(cmd *cobra.Command) {
"proxy-app",
config.ProxyApp,
"proxy app address, or one of: 'kvstore',"+
" 'persistent_kvstore' or 'noop' for local testing.")
" 'persistent_kvstore',"+
" 'counter',"+
" 'counter_serial' or 'noop' for local testing.")
cmd.Flags().String("abci", config.ABCI, "specify abci transport (socket | grpc)")
// rpc flags
@@ -100,10 +86,7 @@ func AddNodeFlags(cmd *cobra.Command) {
config.Consensus.CreateEmptyBlocksInterval.String(),
"the possible interval between empty blocks")
addDBFlags(cmd)
}
func addDBFlags(cmd *cobra.Command) {
// db flags
cmd.Flags().String(
"db-backend",
config.DBBackend,
@@ -116,7 +99,7 @@ func addDBFlags(cmd *cobra.Command) {
// NewRunNodeCmd returns the command that allows the CLI to start a node.
// It can be used with a custom PrivValidator and in-process ABCI application.
func NewRunNodeCmd(nodeProvider cfg.ServiceProvider) *cobra.Command {
func NewRunNodeCmd(nodeProvider nm.Provider) *cobra.Command {
cmd := &cobra.Command{
Use: "start",
Aliases: []string{"node", "run"},
@@ -135,7 +118,7 @@ func NewRunNodeCmd(nodeProvider cfg.ServiceProvider) *cobra.Command {
return fmt.Errorf("failed to start node: %w", err)
}
logger.Info("started node", "node", n.String())
logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo())
// Stop upon receiving SIGTERM or CTRL-C.
tmos.TrapSignal(logger, func() {
@@ -175,7 +158,7 @@ func checkGenesisHash(config *cfg.Config) error {
// Compare with the flag.
if !bytes.Equal(genesisHash, actualHash) {
return fmt.Errorf(
"--genesis-hash=%X does not match %s hash: %X",
"--genesis_hash=%X does not match %s hash: %X",
genesisHash, config.GenesisFile(), actualHash)
}

View File

@@ -4,21 +4,25 @@ import (
"fmt"
"github.com/spf13/cobra"
"github.com/tendermint/tendermint/p2p"
)
// ShowNodeIDCmd dumps node's ID to the standard output.
var ShowNodeIDCmd = &cobra.Command{
Use: "show-node-id",
Short: "Show this node's ID",
RunE: showNodeID,
Use: "show-node-id",
Aliases: []string{"show_node_id"},
Short: "Show this node's ID",
RunE: showNodeID,
PreRun: deprecateSnakeCase,
}
func showNodeID(cmd *cobra.Command, args []string) error {
nodeKeyID, err := config.LoadNodeKeyID()
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
if err != nil {
return err
}
fmt.Println(nodeKeyID)
fmt.Println(nodeKey.ID)
return nil
}

View File

@@ -16,9 +16,11 @@ import (
// ShowValidatorCmd adds capabilities for showing the validator info.
var ShowValidatorCmd = &cobra.Command{
Use: "show-validator",
Short: "Show this node's validator info",
RunE: showValidator,
Use: "show-validator",
Aliases: []string{"show_validator"},
Short: "Show this node's validator info",
RunE: showValidator,
PreRun: deprecateSnakeCase,
}
func showValidator(cmd *cobra.Command, args []string) error {
@@ -28,15 +30,10 @@ func showValidator(cmd *cobra.Command, args []string) error {
)
//TODO: remove once gRPC is the only supported protocol
protocol, _ := tmnet.ProtocolAndAddress(config.PrivValidator.ListenAddr)
protocol, _ := tmnet.ProtocolAndAddress(config.PrivValidatorListenAddr)
switch protocol {
case "grpc":
pvsc, err := tmgrpc.DialRemoteSigner(
config.PrivValidator,
config.ChainID(),
logger,
config.Instrumentation.Prometheus,
)
pvsc, err := tmgrpc.DialRemoteSigner(config, config.ChainID(), logger)
if err != nil {
return fmt.Errorf("can't connect to remote validator %w", err)
}
@@ -50,12 +47,12 @@ func showValidator(cmd *cobra.Command, args []string) error {
}
default:
keyFilePath := config.PrivValidator.KeyFile()
keyFilePath := config.PrivValidatorKeyFile()
if !tmos.FileExists(keyFilePath) {
return fmt.Errorf("private validator file %s does not exist", keyFilePath)
}
pv, err := privval.LoadFilePV(keyFilePath, config.PrivValidator.StateFile())
pv, err := privval.LoadFilePV(keyFilePath, config.PrivValidatorStateFile())
if err != nil {
return err
}

View File

@@ -14,9 +14,10 @@ import (
cfg "github.com/tendermint/tendermint/config"
"github.com/tendermint/tendermint/libs/bytes"
tmrand "github.com/tendermint/tendermint/libs/rand"
tmtime "github.com/tendermint/tendermint/libs/time"
"github.com/tendermint/tendermint/p2p"
"github.com/tendermint/tendermint/privval"
"github.com/tendermint/tendermint/types"
tmtime "github.com/tendermint/tendermint/types/time"
)
var (
@@ -143,8 +144,8 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
return err
}
pvKeyFile := filepath.Join(nodeDir, config.PrivValidator.Key)
pvStateFile := filepath.Join(nodeDir, config.PrivValidator.State)
pvKeyFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidatorKey)
pvStateFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidatorState)
pv, err := privval.LoadFilePV(pvKeyFile, pvStateFile)
if err != nil {
return err
@@ -273,11 +274,11 @@ func persistentPeersArray(config *cfg.Config) ([]string, error) {
for i := 0; i < nValidators+nNonValidators; i++ {
nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i))
config.SetRoot(nodeDir)
nodeKey, err := config.LoadNodeKeyID()
nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile())
if err != nil {
return []string{}, err
}
peers[i] = nodeKey.AddressString(fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort))
peers[i] = p2p.IDAddressString(nodeKey.ID, fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort))
}
return peers, nil
}

View File

@@ -13,6 +13,6 @@ var VersionCmd = &cobra.Command{
Use: "version",
Short: "Show version info",
Run: func(cmd *cobra.Command, args []string) {
fmt.Println(version.TMVersion)
fmt.Println(version.TMCoreSemVer)
},
}

View File

@@ -15,7 +15,6 @@ func main() {
rootCmd := cmd.RootCmd
rootCmd.AddCommand(
cmd.GenValidatorCmd,
cmd.ReIndexEventCmd,
cmd.InitFilesCmd,
cmd.ProbeUpnpCmd,
cmd.LightCmd,
@@ -28,8 +27,6 @@ func main() {
cmd.ShowNodeIDCmd,
cmd.GenNodeKeyCmd,
cmd.VersionCmd,
cmd.InspectCmd,
cmd.MakeKeyMigrateCommand(),
debug.DebugCmd,
cli.NewCompletionCmd(rootCmd, true),
)
@@ -41,8 +38,8 @@ func main() {
// * Supply a genesis doc file from another source
// * Provide their own DB implementation
// can copy this file and use something other than the
// node.NewDefault function
nodeFunc := nm.NewDefault
// DefaultNewNode function
nodeFunc := nm.DefaultNewNode
// Create & start node
rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc))

View File

@@ -4,16 +4,10 @@ import (
"encoding/hex"
"errors"
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"time"
tmjson "github.com/tendermint/tendermint/libs/json"
"github.com/tendermint/tendermint/libs/log"
tmos "github.com/tendermint/tendermint/libs/os"
"github.com/tendermint/tendermint/types"
)
const (
@@ -22,6 +16,11 @@ const (
// FuzzModeDelay is a mode in which we randomly sleep
FuzzModeDelay
// LogFormatPlain is a format for colored text
LogFormatPlain = "plain"
// LogFormatJSON is a format for json output
LogFormatJSON = "json"
// DefaultLogLevel defines a default log level as INFO.
DefaultLogLevel = "info"
@@ -29,11 +28,8 @@ const (
ModeValidator = "validator"
ModeSeed = "seed"
BlockSyncV0 = "v0"
BlockSyncV2 = "v2"
MempoolV0 = "v0"
MempoolV1 = "v1"
BlockchainV0 = "v0"
BlockchainV2 = "v2"
)
// NOTE: Most of the structs & relevant comments + the
@@ -76,11 +72,10 @@ type Config struct {
P2P *P2PConfig `mapstructure:"p2p"`
Mempool *MempoolConfig `mapstructure:"mempool"`
StateSync *StateSyncConfig `mapstructure:"statesync"`
BlockSync *BlockSyncConfig `mapstructure:"blocksync"`
FastSync *FastSyncConfig `mapstructure:"fastsync"`
Consensus *ConsensusConfig `mapstructure:"consensus"`
TxIndex *TxIndexConfig `mapstructure:"tx-index"`
Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"`
PrivValidator *PrivValidatorConfig `mapstructure:"priv-validator"`
}
// DefaultConfig returns a default configuration for a Tendermint node
@@ -91,11 +86,10 @@ func DefaultConfig() *Config {
P2P: DefaultP2PConfig(),
Mempool: DefaultMempoolConfig(),
StateSync: DefaultStateSyncConfig(),
BlockSync: DefaultBlockSyncConfig(),
FastSync: DefaultFastSyncConfig(),
Consensus: DefaultConsensusConfig(),
TxIndex: DefaultTxIndexConfig(),
Instrumentation: DefaultInstrumentationConfig(),
PrivValidator: DefaultPrivValidatorConfig(),
}
}
@@ -114,11 +108,10 @@ func TestConfig() *Config {
P2P: TestP2PConfig(),
Mempool: TestMempoolConfig(),
StateSync: TestStateSyncConfig(),
BlockSync: TestBlockSyncConfig(),
FastSync: TestFastSyncConfig(),
Consensus: TestConsensusConfig(),
TxIndex: TestTxIndexConfig(),
Instrumentation: TestInstrumentationConfig(),
PrivValidator: DefaultPrivValidatorConfig(),
}
}
@@ -129,7 +122,6 @@ func (cfg *Config) SetRoot(root string) *Config {
cfg.P2P.RootDir = root
cfg.Mempool.RootDir = root
cfg.Consensus.RootDir = root
cfg.PrivValidator.RootDir = root
return cfg
}
@@ -151,8 +143,8 @@ func (cfg *Config) ValidateBasic() error {
if err := cfg.StateSync.ValidateBasic(); err != nil {
return fmt.Errorf("error in [statesync] section: %w", err)
}
if err := cfg.BlockSync.ValidateBasic(); err != nil {
return fmt.Errorf("error in [blocksync] section: %w", err)
if err := cfg.FastSync.ValidateBasic(); err != nil {
return fmt.Errorf("error in [fastsync] section: %w", err)
}
if err := cfg.Consensus.ValidateBasic(); err != nil {
return fmt.Errorf("error in [consensus] section: %w", err)
@@ -194,6 +186,11 @@ type BaseConfig struct { //nolint: maligned
// - No priv_validator_key.json, priv_validator_state.json
Mode string `mapstructure:"mode"`
// If this node is many blocks behind the tip of the chain, FastSync
// allows them to catchup quickly by downloading blocks in parallel
// and verifying their commits
FastSyncMode bool `mapstructure:"fast-sync"`
// Database backend: goleveldb | cleveldb | boltdb | rocksdb
// * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
// - pure go
@@ -227,6 +224,26 @@ type BaseConfig struct { //nolint: maligned
// Path to the JSON file containing the initial validator set and other meta data
Genesis string `mapstructure:"genesis-file"`
// Path to the JSON file containing the private key to use as a validator in the consensus protocol
PrivValidatorKey string `mapstructure:"priv-validator-key-file"`
// Path to the JSON file containing the last sign state of a validator
PrivValidatorState string `mapstructure:"priv-validator-state-file"`
// TCP or UNIX socket address for Tendermint to listen on for
// connections from an external PrivValidator process
PrivValidatorListenAddr string `mapstructure:"priv-validator-laddr"`
// Client certificate generated while creating needed files for secure connection.
// If a remote validator address is provided but no certificate, the connection will be insecure
PrivValidatorClientCertificate string `mapstructure:"priv-validator-client-certificate-file"`
// Client key generated while creating certificates for secure connection
PrivValidatorClientKey string `mapstructure:"priv-validator-client-key-file"`
// Path Root Certificate Authority used to sign both client and server certificates
PrivValidatorRootCA string `mapstructure:"priv-validator-root-ca-file"`
// A JSON file containing the private key to use for p2p authenticated encryption
NodeKey string `mapstructure:"node-key-file"`
@@ -236,24 +253,25 @@ type BaseConfig struct { //nolint: maligned
// If true, query the ABCI app on connecting to a new peer
// so the app can decide if we should keep the connection or not
FilterPeers bool `mapstructure:"filter-peers"` // false
Other map[string]interface{} `mapstructure:",remain"`
}
// DefaultBaseConfig returns a default base configuration for a Tendermint node
func DefaultBaseConfig() BaseConfig {
return BaseConfig{
Genesis: defaultGenesisJSONPath,
NodeKey: defaultNodeKeyPath,
Mode: defaultMode,
Moniker: defaultMoniker,
ProxyApp: "tcp://127.0.0.1:26658",
ABCI: "socket",
LogLevel: DefaultLogLevel,
LogFormat: log.LogFormatPlain,
FilterPeers: false,
DBBackend: "goleveldb",
DBPath: "data",
Genesis: defaultGenesisJSONPath,
PrivValidatorKey: defaultPrivValKeyPath,
PrivValidatorState: defaultPrivValStatePath,
NodeKey: defaultNodeKeyPath,
Mode: defaultMode,
Moniker: defaultMoniker,
ProxyApp: "tcp://127.0.0.1:26658",
ABCI: "socket",
LogLevel: DefaultLogLevel,
LogFormat: LogFormatPlain,
FastSyncMode: true,
FilterPeers: false,
DBBackend: "goleveldb",
DBPath: "data",
}
}
@@ -263,6 +281,7 @@ func TestBaseConfig() BaseConfig {
cfg.chainID = "tendermint_test"
cfg.Mode = ModeValidator
cfg.ProxyApp = "kvstore"
cfg.FastSyncMode = false
cfg.DBBackend = "memdb"
return cfg
}
@@ -276,169 +295,72 @@ func (cfg BaseConfig) GenesisFile() string {
return rootify(cfg.Genesis, cfg.RootDir)
}
// PrivValidatorClientKeyFile returns the full path to the priv_validator_key.json file
func (cfg BaseConfig) PrivValidatorClientKeyFile() string {
return rootify(cfg.PrivValidatorClientKey, cfg.RootDir)
}
// PrivValidatorClientCertificateFile returns the full path to the priv_validator_key.json file
func (cfg BaseConfig) PrivValidatorClientCertificateFile() string {
return rootify(cfg.PrivValidatorClientCertificate, cfg.RootDir)
}
// PrivValidatorCertificateAuthorityFile returns the full path to the priv_validator_key.json file
func (cfg BaseConfig) PrivValidatorRootCAFile() string {
return rootify(cfg.PrivValidatorRootCA, cfg.RootDir)
}
// PrivValidatorKeyFile returns the full path to the priv_validator_key.json file
func (cfg BaseConfig) PrivValidatorKeyFile() string {
return rootify(cfg.PrivValidatorKey, cfg.RootDir)
}
// PrivValidatorFile returns the full path to the priv_validator_state.json file
func (cfg BaseConfig) PrivValidatorStateFile() string {
return rootify(cfg.PrivValidatorState, cfg.RootDir)
}
// NodeKeyFile returns the full path to the node_key.json file
func (cfg BaseConfig) NodeKeyFile() string {
return rootify(cfg.NodeKey, cfg.RootDir)
}
// LoadNodeKey loads NodeKey located in filePath.
func (cfg BaseConfig) LoadNodeKeyID() (types.NodeID, error) {
jsonBytes, err := ioutil.ReadFile(cfg.NodeKeyFile())
if err != nil {
return "", err
}
nodeKey := types.NodeKey{}
err = tmjson.Unmarshal(jsonBytes, &nodeKey)
if err != nil {
return "", err
}
nodeKey.ID = types.NodeIDFromPubKey(nodeKey.PubKey())
return nodeKey.ID, nil
}
// LoadOrGenNodeKey attempts to load the NodeKey from the given filePath. If
// the file does not exist, it generates and saves a new NodeKey.
func (cfg BaseConfig) LoadOrGenNodeKeyID() (types.NodeID, error) {
if tmos.FileExists(cfg.NodeKeyFile()) {
nodeKey, err := cfg.LoadNodeKeyID()
if err != nil {
return "", err
}
return nodeKey, nil
}
nodeKey := types.GenNodeKey()
if err := nodeKey.SaveAs(cfg.NodeKeyFile()); err != nil {
return "", err
}
return nodeKey.ID, nil
}
// DBDir returns the full path to the database directory
func (cfg BaseConfig) DBDir() string {
return rootify(cfg.DBPath, cfg.RootDir)
}
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg BaseConfig) ValidateBasic() error {
switch cfg.LogFormat {
case log.LogFormatJSON, log.LogFormatText, log.LogFormatPlain:
default:
return errors.New("unknown log format (must be 'plain', 'text' or 'json')")
}
switch cfg.Mode {
case ModeFull, ModeValidator, ModeSeed:
case "":
return errors.New("no mode has been set")
default:
return fmt.Errorf("unknown mode: %v", cfg.Mode)
}
// TODO (https://github.com/tendermint/tendermint/issues/6908) remove this check after the v0.35 release cycle.
// This check was added to give users an upgrade prompt to use the new
// configuration option in v0.35. In future release cycles they should no longer
// be using this configuration parameter so the check can be removed.
// The cfg.Other field can likely be removed at the same time if it is not referenced
// elsewhere as it was added to service this check.
if fs, ok := cfg.Other["fastsync"]; ok {
if _, ok := fs.(map[string]interface{}); ok {
return fmt.Errorf("a configuration section named 'fastsync' was found in the " +
"configuration file. The 'fastsync' section has been renamed to " +
"'blocksync', please update the 'fastsync' field in your configuration file to 'blocksync'")
}
}
if fs, ok := cfg.Other["fast-sync"]; ok {
if fs != "" {
return fmt.Errorf("a parameter named 'fast-sync' was found in the " +
"configuration file. The parameter to enable or disable quickly syncing with a blockchain" +
"has moved to the [blocksync] section of the configuration file as blocksync.enable. " +
"Please move the 'fast-sync' field in your configuration file to 'blocksync.enable'")
}
}
return nil
}
//-----------------------------------------------------------------------------
// PrivValidatorConfig
// PrivValidatorConfig defines the configuration parameters for running a validator
type PrivValidatorConfig struct {
RootDir string `mapstructure:"home"`
// Path to the JSON file containing the private key to use as a validator in the consensus protocol
Key string `mapstructure:"key-file"`
// Path to the JSON file containing the last sign state of a validator
State string `mapstructure:"state-file"`
// TCP or UNIX socket address for Tendermint to listen on for
// connections from an external PrivValidator process
ListenAddr string `mapstructure:"laddr"`
// Client certificate generated while creating needed files for secure connection.
// If a remote validator address is provided but no certificate, the connection will be insecure
ClientCertificate string `mapstructure:"client-certificate-file"`
// Client key generated while creating certificates for secure connection
ClientKey string `mapstructure:"client-key-file"`
// Path Root Certificate Authority used to sign both client and server certificates
RootCA string `mapstructure:"root-ca-file"`
}
// DefaultBaseConfig returns a default private validator configuration
// for a Tendermint node.
func DefaultPrivValidatorConfig() *PrivValidatorConfig {
return &PrivValidatorConfig{
Key: defaultPrivValKeyPath,
State: defaultPrivValStatePath,
}
}
// ClientKeyFile returns the full path to the priv_validator_key.json file
func (cfg *PrivValidatorConfig) ClientKeyFile() string {
return rootify(cfg.ClientKey, cfg.RootDir)
}
// ClientCertificateFile returns the full path to the priv_validator_key.json file
func (cfg *PrivValidatorConfig) ClientCertificateFile() string {
return rootify(cfg.ClientCertificate, cfg.RootDir)
}
// CertificateAuthorityFile returns the full path to the priv_validator_key.json file
func (cfg *PrivValidatorConfig) RootCAFile() string {
return rootify(cfg.RootCA, cfg.RootDir)
}
// KeyFile returns the full path to the priv_validator_key.json file
func (cfg *PrivValidatorConfig) KeyFile() string {
return rootify(cfg.Key, cfg.RootDir)
}
// StateFile returns the full path to the priv_validator_state.json file
func (cfg *PrivValidatorConfig) StateFile() string {
return rootify(cfg.State, cfg.RootDir)
}
func (cfg *PrivValidatorConfig) AreSecurityOptionsPresent() bool {
func (cfg *BaseConfig) ArePrivValidatorClientSecurityOptionsPresent() bool {
switch {
case cfg.RootCA == "":
case cfg.PrivValidatorRootCA == "":
return false
case cfg.ClientKey == "":
case cfg.PrivValidatorClientKey == "":
return false
case cfg.ClientCertificate == "":
case cfg.PrivValidatorClientCertificate == "":
return false
default:
return true
}
}
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg BaseConfig) ValidateBasic() error {
switch cfg.LogFormat {
case LogFormatPlain, LogFormatJSON:
default:
return errors.New("unknown log format (must be 'plain' or 'json')")
}
switch cfg.Mode {
case ModeFull, ModeValidator, ModeSeed:
case "":
return errors.New("no mode has been set")
default:
return fmt.Errorf("unknown mode: %v", cfg.Mode)
}
return nil
}
//-----------------------------------------------------------------------------
// RPCConfig
@@ -463,7 +385,6 @@ type RPCConfig struct {
// TCP or UNIX socket address for the gRPC server to listen on
// NOTE: This server only supports /broadcast_tx_commit
// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36.
GRPCListenAddress string `mapstructure:"grpc-laddr"`
// Maximum number of simultaneous connections.
@@ -471,7 +392,6 @@ type RPCConfig struct {
// If you want to accept a larger number than the default, make sure
// you increase your OS limits.
// 0 - unlimited.
// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36.
GRPCMaxOpenConnections int `mapstructure:"grpc-max-open-connections"`
// Activate unsafe RPC commands like /dial-persistent-peers and /unsafe-flush-mempool
@@ -631,16 +551,8 @@ type P2PConfig struct { //nolint: maligned
// Comma separated list of seed nodes to connect to
// We only use these if we cant connect to peers in the addrbook
// NOTE: not used by the new PEX reactor. Please use BootstrapPeers instead.
// TODO: Remove once p2p refactor is complete
// ref: https://github.com/tendermint/tendermint/issues/5670
Seeds string `mapstructure:"seeds"`
// Comma separated list of peers to be added to the peer store
// on startup. Either BootstrapPeers or PersistentPeers are
// needed for peer discovery
BootstrapPeers string `mapstructure:"bootstrap-peers"`
// Comma separated list of nodes to keep persistent connections to
PersistentPeers string `mapstructure:"persistent-peers"`
@@ -655,25 +567,11 @@ type P2PConfig struct { //nolint: maligned
AddrBookStrict bool `mapstructure:"addr-book-strict"`
// Maximum number of inbound peers
//
// TODO: Remove once p2p refactor is complete in favor of MaxConnections.
// ref: https://github.com/tendermint/tendermint/issues/5670
MaxNumInboundPeers int `mapstructure:"max-num-inbound-peers"`
// Maximum number of outbound peers to connect to, excluding persistent peers.
//
// TODO: Remove once p2p refactor is complete in favor of MaxConnections.
// ref: https://github.com/tendermint/tendermint/issues/5670
// Maximum number of outbound peers to connect to, excluding persistent peers
MaxNumOutboundPeers int `mapstructure:"max-num-outbound-peers"`
// MaxConnections defines the maximum number of connected peers (inbound and
// outbound).
MaxConnections uint16 `mapstructure:"max-connections"`
// MaxIncomingConnectionAttempts rate limits the number of incoming connection
// attempts per IP address.
MaxIncomingConnectionAttempts uint `mapstructure:"max-incoming-connection-attempts"`
// List of node IDs, to which a connection will be (re)established ignoring any existing limits
UnconditionalPeerIDs string `mapstructure:"unconditional-peer-ids"`
@@ -709,32 +607,20 @@ type P2PConfig struct { //nolint: maligned
// Testing params.
// Force dial to fail
TestDialFail bool `mapstructure:"test-dial-fail"`
// UseLegacy enables the "legacy" P2P implementation and
// disables the newer default implementation. This flag will
// be removed in a future release.
UseLegacy bool `mapstructure:"use-legacy"`
// Makes it possible to configure which queue backend the p2p
// layer uses. Options are: "fifo", "priority" and "wdrr",
// with the default being "priority".
QueueType string `mapstructure:"queue-type"`
}
// DefaultP2PConfig returns a default configuration for the peer-to-peer layer
func DefaultP2PConfig() *P2PConfig {
return &P2PConfig{
ListenAddress: "tcp://0.0.0.0:26656",
ExternalAddress: "",
UPNP: false,
AddrBook: defaultAddrBookPath,
AddrBookStrict: true,
MaxNumInboundPeers: 40,
MaxNumOutboundPeers: 10,
MaxConnections: 64,
MaxIncomingConnectionAttempts: 100,
PersistentPeersMaxDialPeriod: 0 * time.Second,
FlushThrottleTimeout: 100 * time.Millisecond,
ListenAddress: "tcp://0.0.0.0:26656",
ExternalAddress: "",
UPNP: false,
AddrBook: defaultAddrBookPath,
AddrBookStrict: true,
MaxNumInboundPeers: 40,
MaxNumOutboundPeers: 10,
PersistentPeersMaxDialPeriod: 0 * time.Second,
FlushThrottleTimeout: 100 * time.Millisecond,
// The MTU (Maximum Transmission Unit) for Ethernet is 1500 bytes.
// The IP header and the TCP header take up 20 bytes each at least (unless
// optional header fields are used) and thus the max for (non-Jumbo frame)
@@ -748,8 +634,6 @@ func DefaultP2PConfig() *P2PConfig {
HandshakeTimeout: 20 * time.Second,
DialTimeout: 3 * time.Second,
TestDialFail: false,
QueueType: "priority",
UseLegacy: false,
}
}
@@ -797,69 +681,45 @@ func (cfg *P2PConfig) ValidateBasic() error {
//-----------------------------------------------------------------------------
// MempoolConfig
// MempoolConfig defines the configuration options for the Tendermint mempool.
// MempoolConfig defines the configuration options for the Tendermint mempool
type MempoolConfig struct {
Version string `mapstructure:"version"`
RootDir string `mapstructure:"home"`
Recheck bool `mapstructure:"recheck"`
Broadcast bool `mapstructure:"broadcast"`
WalPath string `mapstructure:"wal-dir"`
// Maximum number of transactions in the mempool
Size int `mapstructure:"size"`
// Limit the total size of all txs in the mempool.
// This only accounts for raw transactions (e.g. given 1MB transactions and
// max-txs-bytes=5MB, mempool will only accept 5 transactions).
MaxTxsBytes int64 `mapstructure:"max-txs-bytes"`
// Size of the cache (used to filter transactions we saw earlier) in transactions
CacheSize int `mapstructure:"cache-size"`
// Do not remove invalid transactions from the cache (default: false)
// Set to true if it's not possible for any invalid transaction to become
// valid again in the future.
KeepInvalidTxsInCache bool `mapstructure:"keep-invalid-txs-in-cache"`
// Maximum size of a single transaction
// NOTE: the max size of a tx transmitted over the network is {max-tx-bytes}.
MaxTxBytes int `mapstructure:"max-tx-bytes"`
// Maximum size of a batch of transactions to send to a peer
// Including space needed by encoding (one varint per transaction).
// XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
MaxBatchBytes int `mapstructure:"max-batch-bytes"`
// TTLDuration, if non-zero, defines the maximum amount of time a transaction
// can exist for in the mempool.
//
// Note, if TTLNumBlocks is also defined, a transaction will be removed if it
// has existed in the mempool at least TTLNumBlocks number of blocks or if it's
// insertion time into the mempool is beyond TTLDuration.
TTLDuration time.Duration `mapstructure:"ttl-duration"`
// TTLNumBlocks, if non-zero, defines the maximum number of blocks a transaction
// can exist for in the mempool.
//
// Note, if TTLDuration is also defined, a transaction will be removed if it
// has existed in the mempool at least TTLNumBlocks number of blocks or if
// it's insertion time into the mempool is beyond TTLDuration.
TTLNumBlocks int64 `mapstructure:"ttl-num-blocks"`
}
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool.
// DefaultMempoolConfig returns a default configuration for the Tendermint mempool
func DefaultMempoolConfig() *MempoolConfig {
return &MempoolConfig{
Version: MempoolV1,
Recheck: true,
Broadcast: true,
WalPath: "",
// Each signature verification takes .5ms, Size reduced until we implement
// ABCI Recheck
Size: 5000,
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
CacheSize: 10000,
MaxTxBytes: 1024 * 1024, // 1MB
TTLDuration: 0 * time.Second,
TTLNumBlocks: 0,
Size: 5000,
MaxTxsBytes: 1024 * 1024 * 1024, // 1GB
CacheSize: 10000,
MaxTxBytes: 1024 * 1024, // 1MB
}
}
@@ -870,6 +730,16 @@ func TestMempoolConfig() *MempoolConfig {
return cfg
}
// WalDir returns the full path to the mempool's write-ahead log
func (cfg *MempoolConfig) WalDir() string {
return rootify(cfg.WalPath, cfg.RootDir)
}
// WalEnabled returns true if the WAL is enabled.
func (cfg *MempoolConfig) WalEnabled() bool {
return cfg.WalPath != ""
}
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *MempoolConfig) ValidateBasic() error {
@@ -885,13 +755,6 @@ func (cfg *MempoolConfig) ValidateBasic() error {
if cfg.MaxTxBytes < 0 {
return errors.New("max-tx-bytes can't be negative")
}
if cfg.TTLDuration < 0 {
return errors.New("ttl-duration can't be negative")
}
if cfg.TTLNumBlocks < 0 {
return errors.New("ttl-num-blocks can't be negative")
}
return nil
}
@@ -900,46 +763,13 @@ func (cfg *MempoolConfig) ValidateBasic() error {
// StateSyncConfig defines the configuration for the Tendermint state sync service
type StateSyncConfig struct {
// State sync rapidly bootstraps a new node by discovering, fetching, and restoring a
// state machine snapshot from peers instead of fetching and replaying historical
// blocks. Requires some peers in the network to take and serve state machine
// snapshots. State sync is not attempted if the node has any local state
// (LastBlockHeight > 0). The node will have a truncated block history, starting from
// the height of the snapshot.
Enable bool `mapstructure:"enable"`
// State sync uses light client verification to verify state. This can be done either
// through the P2P layer or the RPC layer. Set this to true to use the P2P layer. If
// false (default), the RPC layer will be used.
UseP2P bool `mapstructure:"use-p2p"`
// If using RPC, at least two addresses need to be provided. They should be compatible
// with net.Dial, for example: "host.example.com:2125".
RPCServers []string `mapstructure:"rpc-servers"`
// The hash and height of a trusted block. Must be within the trust-period.
TrustHeight int64 `mapstructure:"trust-height"`
TrustHash string `mapstructure:"trust-hash"`
// The trust period should be set so that Tendermint can detect and gossip
// misbehavior before it is considered expired. For chains based on the Cosmos SDK,
// one day less than the unbonding period should suffice.
TrustPeriod time.Duration `mapstructure:"trust-period"`
// Time to spend discovering snapshots before initiating a restore.
Enable bool `mapstructure:"enable"`
TempDir string `mapstructure:"temp-dir"`
RPCServers []string `mapstructure:"rpc-servers"`
TrustPeriod time.Duration `mapstructure:"trust-period"`
TrustHeight int64 `mapstructure:"trust-height"`
TrustHash string `mapstructure:"trust-hash"`
DiscoveryTime time.Duration `mapstructure:"discovery-time"`
// Temporary directory for state sync snapshot chunks, defaults to os.TempDir().
// The synchronizer will create a new, randomly named directory within this directory
// and remove it when the sync is complete.
TempDir string `mapstructure:"temp-dir"`
// The timeout duration before re-requesting a chunk, possibly from a different
// peer (default: 15 seconds).
ChunkRequestTimeout time.Duration `mapstructure:"chunk-request-timeout"`
// The number of concurrent chunk and block fetchers to run (default: 4).
Fetchers int32 `mapstructure:"fetchers"`
}
func (cfg *StateSyncConfig) TrustHashBytes() []byte {
@@ -954,103 +784,76 @@ func (cfg *StateSyncConfig) TrustHashBytes() []byte {
// DefaultStateSyncConfig returns a default configuration for the state sync service
func DefaultStateSyncConfig() *StateSyncConfig {
return &StateSyncConfig{
TrustPeriod: 168 * time.Hour,
DiscoveryTime: 15 * time.Second,
ChunkRequestTimeout: 15 * time.Second,
Fetchers: 4,
TrustPeriod: 168 * time.Hour,
DiscoveryTime: 15 * time.Second,
}
}
// TestStateSyncConfig returns a default configuration for the state sync service
// TestFastSyncConfig returns a default configuration for the state sync service
func TestStateSyncConfig() *StateSyncConfig {
return DefaultStateSyncConfig()
}
// ValidateBasic performs basic validation.
func (cfg *StateSyncConfig) ValidateBasic() error {
if !cfg.Enable {
return nil
}
// If we're not using the P2P stack then we need to validate the
// RPCServers
if !cfg.UseP2P {
if len(cfg.RPCServers) < 2 {
return errors.New("at least two rpc-servers must be specified")
if cfg.Enable {
if len(cfg.RPCServers) == 0 {
return errors.New("rpc-servers is required")
}
if len(cfg.RPCServers) < 2 {
return errors.New("at least two rpc-servers entries is required")
}
for _, server := range cfg.RPCServers {
if server == "" {
if len(server) == 0 {
return errors.New("found empty rpc-servers entry")
}
}
if cfg.TrustPeriod <= 0 {
return errors.New("trusted-period is required")
}
if cfg.TrustHeight <= 0 {
return errors.New("trusted-height is required")
}
if len(cfg.TrustHash) == 0 {
return errors.New("trusted-hash is required")
}
_, err := hex.DecodeString(cfg.TrustHash)
if err != nil {
return fmt.Errorf("invalid trusted-hash: %w", err)
}
}
if cfg.DiscoveryTime != 0 && cfg.DiscoveryTime < 5*time.Second {
return errors.New("discovery time must be 0s or greater than five seconds")
}
if cfg.TrustPeriod <= 0 {
return errors.New("trusted-period is required")
}
if cfg.TrustHeight <= 0 {
return errors.New("trusted-height is required")
}
if len(cfg.TrustHash) == 0 {
return errors.New("trusted-hash is required")
}
_, err := hex.DecodeString(cfg.TrustHash)
if err != nil {
return fmt.Errorf("invalid trusted-hash: %w", err)
}
if cfg.ChunkRequestTimeout < 5*time.Second {
return errors.New("chunk-request-timeout must be at least 5 seconds")
}
if cfg.Fetchers <= 0 {
return errors.New("fetchers is required")
}
return nil
}
//-----------------------------------------------------------------------------
// FastSyncConfig
// BlockSyncConfig (formerly known as FastSync) defines the configuration for the Tendermint block sync service
// If this node is many blocks behind the tip of the chain, BlockSync
// allows them to catchup quickly by downloading blocks in parallel
// and verifying their commits.
type BlockSyncConfig struct {
Enable bool `mapstructure:"enable"`
// FastSyncConfig defines the configuration for the Tendermint fast sync service
type FastSyncConfig struct {
Version string `mapstructure:"version"`
}
// DefaultBlockSyncConfig returns a default configuration for the block sync service
func DefaultBlockSyncConfig() *BlockSyncConfig {
return &BlockSyncConfig{
Enable: true,
Version: BlockSyncV0,
// DefaultFastSyncConfig returns a default configuration for the fast sync service
func DefaultFastSyncConfig() *FastSyncConfig {
return &FastSyncConfig{
Version: BlockchainV0,
}
}
// TestBlockSyncConfig returns a default configuration for the block sync.
func TestBlockSyncConfig() *BlockSyncConfig {
return DefaultBlockSyncConfig()
// TestFastSyncConfig returns a default configuration for the fast sync.
func TestFastSyncConfig() *FastSyncConfig {
return DefaultFastSyncConfig()
}
// ValidateBasic performs basic validation.
func (cfg *BlockSyncConfig) ValidateBasic() error {
func (cfg *FastSyncConfig) ValidateBasic() error {
switch cfg.Version {
case BlockSyncV0:
case BlockchainV0:
return nil
case BlockchainV2:
return nil
case BlockSyncV2:
return errors.New("blocksync version v2 is no longer supported. Please use v0")
default:
return fmt.Errorf("unknown blocksync version %s", cfg.Version)
return fmt.Errorf("unknown fastsync version %s", cfg.Version)
}
}
@@ -1229,25 +1032,19 @@ func (cfg *ConsensusConfig) ValidateBasic() error {
// TxIndexConfig defines the configuration for the transaction indexer,
// including composite keys to index.
type TxIndexConfig struct {
// The backend database list to back the indexer.
// If list contains `null`, meaning no indexer service will be used.
// What indexer to use for transactions
//
// Options:
// 1) "null" - no indexer services.
// 1) "null"
// 2) "kv" (default) - the simplest possible indexer,
// backed by key-value storage (defaults to levelDB; see DBBackend).
// 3) "psql" - the indexer services backed by PostgreSQL.
Indexer []string `mapstructure:"indexer"`
// The PostgreSQL connection configuration, the connection format:
// postgresql://<user>:<password>@<host>:<port>/<db>?<opts>
PsqlConn string `mapstructure:"psql-conn"`
Indexer string `mapstructure:"indexer"`
}
// DefaultTxIndexConfig returns a default configuration for the transaction indexer.
func DefaultTxIndexConfig() *TxIndexConfig {
return &TxIndexConfig{
Indexer: []string{"kv"},
Indexer: "kv",
}
}

View File

@@ -22,9 +22,12 @@ func TestDefaultConfig(t *testing.T) {
cfg.SetRoot("/foo")
cfg.Genesis = "bar"
cfg.DBPath = "/opt/data"
cfg.Mempool.WalPath = "wal/mem/"
assert.Equal("/foo/bar", cfg.GenesisFile())
assert.Equal("/opt/data", cfg.DBDir())
assert.Equal("/foo/wal/mem", cfg.Mempool.WalDir())
}
func TestConfigValidateBasic(t *testing.T) {
@@ -125,13 +128,13 @@ func TestStateSyncConfigValidateBasic(t *testing.T) {
require.NoError(t, cfg.ValidateBasic())
}
func TestBlockSyncConfigValidateBasic(t *testing.T) {
cfg := TestBlockSyncConfig()
func TestFastSyncConfigValidateBasic(t *testing.T) {
cfg := TestFastSyncConfig()
assert.NoError(t, cfg.ValidateBasic())
// tamper with version
cfg.Version = "v2"
assert.Error(t, cfg.ValidateBasic())
assert.NoError(t, cfg.ValidateBasic())
cfg.Version = "invalid"
assert.Error(t, cfg.ValidateBasic())

View File

@@ -1,26 +0,0 @@
package config
import (
"github.com/tendermint/tendermint/libs/log"
"github.com/tendermint/tendermint/libs/service"
db "github.com/tendermint/tm-db"
)
// ServiceProvider takes a config and a logger and returns a ready to go Node.
type ServiceProvider func(*Config, log.Logger) (service.Service, error)
// DBContext specifies config information for loading a new DB.
type DBContext struct {
ID string
Config *Config
}
// DBProvider takes a DBContext and returns an instantiated DB.
type DBProvider func(*DBContext) (db.DB, error)
// DefaultDBProvider returns a database using the DBBackend and DBDir
// specified in the Config.
func DefaultDBProvider(ctx *DBContext) (db.DB, error) {
dbType := db.BackendType(ctx.Config.DBBackend)
return db.NewDB(ctx.ID, dbType, ctx.Config.DBDir())
}

View File

@@ -97,6 +97,11 @@ moniker = "{{ .BaseConfig.Moniker }}"
# - No priv_validator_key.json, priv_validator_state.json
mode = "{{ .BaseConfig.Mode }}"
# If this node is many blocks behind the tip of the chain, FastSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
fast-sync = {{ .BaseConfig.FastSyncMode }}
# Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb
# * goleveldb (github.com/syndtr/goleveldb - most popular implementation)
# - pure go
@@ -132,6 +137,27 @@ log-format = "{{ .BaseConfig.LogFormat }}"
# Path to the JSON file containing the initial validator set and other meta data
genesis-file = "{{ js .BaseConfig.Genesis }}"
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
priv-validator-key-file = "{{ js .BaseConfig.PrivValidatorKey }}"
# Path to the JSON file containing the last sign state of a validator
priv-validator-state-file = "{{ js .BaseConfig.PrivValidatorState }}"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
# when the listenAddr is prefixed with grpc instead of tcp it will use the gRPC Client
priv-validator-laddr = "{{ .BaseConfig.PrivValidatorListenAddr }}"
# Client certificate generated while creating needed files for secure connection.
# If a remote validator address is provided but no certificate, the connection will be insecure
priv-validator-client-certificate-file = "{{ js .BaseConfig.PrivValidatorClientCertificate }}"
# Client key generated while creating certificates for secure connection
priv-validator-client-key-file = "{{ js .BaseConfig.PrivValidatorClientKey }}"
# Path Root Certificate Authority used to sign both client and server certificates
priv-validator-certificate-authority = "{{ js .BaseConfig.PrivValidatorRootCA }}"
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node-key-file = "{{ js .BaseConfig.NodeKey }}"
@@ -143,33 +169,6 @@ abci = "{{ .BaseConfig.ABCI }}"
filter-peers = {{ .BaseConfig.FilterPeers }}
#######################################################
### Priv Validator Configuration ###
#######################################################
[priv-validator]
# Path to the JSON file containing the private key to use as a validator in the consensus protocol
key-file = "{{ js .PrivValidator.Key }}"
# Path to the JSON file containing the last sign state of a validator
state-file = "{{ js .PrivValidator.State }}"
# TCP or UNIX socket address for Tendermint to listen on for
# connections from an external PrivValidator process
# when the listenAddr is prefixed with grpc instead of tcp it will use the gRPC Client
laddr = "{{ .PrivValidator.ListenAddr }}"
# Client certificate generated while creating needed files for secure connection.
# If a remote validator address is provided but no certificate, the connection will be insecure
client-certificate-file = "{{ js .PrivValidator.ClientCertificate }}"
# Client key generated while creating certificates for secure connection
validator-client-key-file = "{{ js .PrivValidator.ClientKey }}"
# Path Root Certificate Authority used to sign both client and server certificates
certificate-authority = "{{ js .PrivValidator.RootCA }}"
#######################################################################
### Advanced Configuration Options ###
#######################################################################
@@ -195,7 +194,6 @@ cors-allowed-headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}
# TCP or UNIX socket address for the gRPC server to listen on
# NOTE: This server only supports /broadcast_tx_commit
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
grpc-laddr = "{{ .RPC.GRPCListenAddress }}"
# Maximum number of simultaneous connections.
@@ -205,7 +203,6 @@ grpc-laddr = "{{ .RPC.GRPCListenAddress }}"
# 0 - unlimited.
# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files}
# 1024 - 40 - 10 - 50 = 924 = ~900
# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36.
grpc-max-open-connections = {{ .RPC.GRPCMaxOpenConnections }}
# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool
@@ -265,34 +262,18 @@ pprof-laddr = "{{ .RPC.PprofListenAddress }}"
#######################################################
[p2p]
# Enable the legacy p2p layer.
use-legacy = {{ .P2P.UseLegacy }}
# Select the p2p internal queue
queue-type = "{{ .P2P.QueueType }}"
# Address to listen for incoming connections
laddr = "{{ .P2P.ListenAddress }}"
# Address to advertise to peers for them to dial
# If empty, will use the same port as the laddr,
# and will introspect on the listener or use UPnP
# to figure out the address. ip and port are required
# example: 159.89.10.97:26656
# to figure out the address.
external-address = "{{ .P2P.ExternalAddress }}"
# Comma separated list of seed nodes to connect to
# We only use these if we cant connect to peers in the addrbook
# NOTE: not used by the new PEX reactor. Please use BootstrapPeers instead.
# TODO: Remove once p2p refactor is complete
# ref: https:#github.com/tendermint/tendermint/issues/5670
seeds = "{{ .P2P.Seeds }}"
# Comma separated list of peers to be added to the peer store
# on startup. Either BootstrapPeers or PersistentPeers are
# needed for peer discovery
bootstrap-peers = "{{ .P2P.BootstrapPeers }}"
# Comma separated list of nodes to keep persistent connections to
persistent-peers = "{{ .P2P.PersistentPeers }}"
@@ -300,7 +281,6 @@ persistent-peers = "{{ .P2P.PersistentPeers }}"
upnp = {{ .P2P.UPNP }}
# Path to address book
# TODO: Remove once p2p refactor is complete in favor of peer store.
addr-book-file = "{{ js .P2P.AddrBook }}"
# Set true for strict address routability rules
@@ -308,26 +288,12 @@ addr-book-file = "{{ js .P2P.AddrBook }}"
addr-book-strict = {{ .P2P.AddrBookStrict }}
# Maximum number of inbound peers
#
# TODO: Remove once p2p refactor is complete in favor of MaxConnections.
# ref: https://github.com/tendermint/tendermint/issues/5670
max-num-inbound-peers = {{ .P2P.MaxNumInboundPeers }}
# Maximum number of outbound peers to connect to, excluding persistent peers
#
# TODO: Remove once p2p refactor is complete in favor of MaxConnections.
# ref: https://github.com/tendermint/tendermint/issues/5670
max-num-outbound-peers = {{ .P2P.MaxNumOutboundPeers }}
# Maximum number of connections (inbound and outbound).
max-connections = {{ .P2P.MaxConnections }}
# Rate limits the number of incoming connection attempts per IP address.
max-incoming-connection-attempts = {{ .P2P.MaxIncomingConnectionAttempts }}
# List of node IDs, to which a connection will be (re)established ignoring any existing limits
# TODO: Remove once p2p refactor is complete.
# ref: https://github.com/tendermint/tendermint/issues/5670
unconditional-peer-ids = "{{ .P2P.UnconditionalPeerIDs }}"
# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used)
@@ -349,7 +315,6 @@ recv-rate = {{ .P2P.RecvRate }}
pex = {{ .P2P.PexReactor }}
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
# Warning: IPs will be exposed at /net_info, for more information https://github.com/tendermint/tendermint/issues/3055
private-peer-ids = "{{ .P2P.PrivatePeerIDs }}"
# Toggle to disable guard against peers connecting from the same ip.
@@ -364,13 +329,9 @@ dial-timeout = "{{ .P2P.DialTimeout }}"
#######################################################
[mempool]
# Mempool version to use:
# 1) "v0" - The legacy non-prioritized mempool reactor.
# 2) "v1" (default) - The prioritized mempool reactor.
version = "{{ .Mempool.Version }}"
recheck = {{ .Mempool.Recheck }}
broadcast = {{ .Mempool.Broadcast }}
wal-dir = "{{ js .Mempool.WalPath }}"
# Maximum number of transactions in the mempool
size = {{ .Mempool.Size }}
@@ -397,22 +358,6 @@ max-tx-bytes = {{ .Mempool.MaxTxBytes }}
# XXX: Unused due to https://github.com/tendermint/tendermint/issues/5796
max-batch-bytes = {{ .Mempool.MaxBatchBytes }}
# ttl-duration, if non-zero, defines the maximum amount of time a transaction
# can exist for in the mempool.
#
# Note, if ttl-num-blocks is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if it's
# insertion time into the mempool is beyond ttl-duration.
ttl-duration = "{{ .Mempool.TTLDuration }}"
# ttl-num-blocks, if non-zero, defines the maximum number of blocks a transaction
# can exist for in the mempool.
#
# Note, if ttl-duration is also defined, a transaction will be removed if it
# has existed in the mempool at least ttl-num-blocks number of blocks or if
# it's insertion time into the mempool is beyond ttl-duration.
ttl-num-blocks = {{ .Mempool.TTLNumBlocks }}
#######################################################
### State Sync Configuration Options ###
#######################################################
@@ -424,53 +369,33 @@ ttl-num-blocks = {{ .Mempool.TTLNumBlocks }}
# starting from the height of the snapshot.
enable = {{ .StateSync.Enable }}
# State sync uses light client verification to verify state. This can be done either through the
# P2P layer or RPC layer. Set this to true to use the P2P layer. If false (default), RPC layer
# will be used.
use-p2p = {{ .StateSync.UseP2P }}
# If using RPC, at least two addresses need to be provided. They should be compatible with net.Dial,
# for example: "host.example.com:2125"
# RPC servers (comma-separated) for light client verification of the synced state machine and
# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding
# header hash obtained from a trusted source, and a period during which validators can be trusted.
#
# For Cosmos SDK-based chains, trust-period should usually be about 2/3 of the unbonding time (~2
# weeks) during which they can be financially punished (slashed) for misbehavior.
rpc-servers = "{{ StringsJoin .StateSync.RPCServers "," }}"
# The hash and height of a trusted block. Must be within the trust-period.
trust-height = {{ .StateSync.TrustHeight }}
trust-hash = "{{ .StateSync.TrustHash }}"
# The trust period should be set so that Tendermint can detect and gossip misbehavior before
# it is considered expired. For chains based on the Cosmos SDK, one day less than the unbonding
# period should suffice.
trust-period = "{{ .StateSync.TrustPeriod }}"
# Time to spend discovering snapshots before initiating a restore.
discovery-time = "{{ .StateSync.DiscoveryTime }}"
# Temporary directory for state sync snapshot chunks, defaults to os.TempDir().
# The synchronizer will create a new, randomly named directory within this directory
# and remove it when the sync is complete.
# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp).
# Will create a new, randomly named directory within, and remove it when done.
temp-dir = "{{ .StateSync.TempDir }}"
# The timeout duration before re-requesting a chunk, possibly from a different
# peer (default: 15 seconds).
chunk-request-timeout = "{{ .StateSync.ChunkRequestTimeout }}"
# The number of concurrent chunk and block fetchers to run (default: 4).
fetchers = "{{ .StateSync.Fetchers }}"
#######################################################
### Block Sync Configuration Connections ###
### Fast Sync Configuration Connections ###
#######################################################
[blocksync]
[fastsync]
# If this node is many blocks behind the tip of the chain, BlockSync
# allows them to catchup quickly by downloading blocks in parallel
# and verifying their commits
enable = {{ .BlockSync.Enable }}
# Block Sync version to use:
# 1) "v0" (default) - the standard Block Sync implementation
# 2) "v2" - DEPRECATED, please use v0
version = "{{ .BlockSync.Version }}"
# Fast Sync version to use:
# 1) "v0" (default) - the legacy fast sync implementation
# 2) "v2" - complete redesign of v0, optimized for testability & readability
version = "{{ .FastSync.Version }}"
#######################################################
### Consensus Configuration Options ###
@@ -518,8 +443,7 @@ peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
#######################################################
[tx-index]
# The backend database list to back the indexer.
# If list contains null, meaning no indexer service will be used.
# What indexer to use for transactions
#
# The application will set which txs to index. In some cases a node operator will be able
# to decide which txs to index based on configuration set in the application.
@@ -528,12 +452,7 @@ peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
# 1) "null"
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed.
# 3) "psql" - the indexer services backed by PostgreSQL.
indexer = [{{ range $i, $e := .TxIndex.Indexer }}{{if $i}}, {{end}}{{ printf "%q" $e}}{{end}}]
# The PostgreSQL connection configuration, the connection format:
# postgresql://<user>:<password>@<host>:<port>/<db>?<opts>
psql-conn = "{{ .TxIndex.PsqlConn }}"
indexer = "{{ .TxIndex.Indexer }}"
#######################################################
### Instrumentation Configuration Options ###
@@ -578,10 +497,10 @@ func ResetTestRootWithChainID(testName string, chainID string) *Config {
panic(err)
}
conf := DefaultConfig()
genesisFilePath := filepath.Join(rootDir, conf.Genesis)
privKeyFilePath := filepath.Join(rootDir, conf.PrivValidator.Key)
privStateFilePath := filepath.Join(rootDir, conf.PrivValidator.State)
baseConfig := DefaultBaseConfig()
genesisFilePath := filepath.Join(rootDir, baseConfig.Genesis)
privKeyFilePath := filepath.Join(rootDir, baseConfig.PrivValidatorKey)
privStateFilePath := filepath.Join(rootDir, baseConfig.PrivValidatorState)
// Write default config file if missing.
writeDefaultConfigFileIfNone(rootDir)

View File

@@ -36,7 +36,9 @@ func TestEnsureRoot(t *testing.T) {
data, err := ioutil.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath))
require.Nil(err)
checkConfig(t, string(data))
if !checkConfig(string(data)) {
t.Fatalf("config file missing some information")
}
ensureFiles(t, tmpDir, "data")
}
@@ -55,23 +57,25 @@ func TestEnsureTestRoot(t *testing.T) {
data, err := ioutil.ReadFile(filepath.Join(rootDir, defaultConfigFilePath))
require.Nil(err)
checkConfig(t, string(data))
if !checkConfig(string(data)) {
t.Fatalf("config file missing some information")
}
// TODO: make sure the cfg returned and testconfig are the same!
baseConfig := DefaultBaseConfig()
pvConfig := DefaultPrivValidatorConfig()
ensureFiles(t, rootDir, defaultDataDir, baseConfig.Genesis, pvConfig.Key, pvConfig.State)
ensureFiles(t, rootDir, defaultDataDir, baseConfig.Genesis, baseConfig.PrivValidatorKey, baseConfig.PrivValidatorState)
}
func checkConfig(t *testing.T, configFile string) {
t.Helper()
func checkConfig(configFile string) bool {
var valid bool
// list of words we expect in the config
var elems = []string{
"moniker",
"seeds",
"proxy-app",
"blocksync",
"create-empty-blocks",
"fast_sync",
"create_empty_blocks",
"peer",
"timeout",
"broadcast",
@@ -84,7 +88,10 @@ func checkConfig(t *testing.T, configFile string) {
}
for _, e := range elems {
if !strings.Contains(configFile, e) {
t.Errorf("config file was expected to contain %s but did not", e)
valid = false
} else {
valid = true
}
}
return valid
}

Some files were not shown because too many files have changed in this diff Show More