diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 30218e098..b48354f01 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -7,5 +7,4 @@ # global owners are only requested if there isn't a more specific # codeowner specified below. For this reason, the global codeowners # are often repeated in package-level definitions. -* @alexanderbez @ebuchman @cmwaters @tessr @tychoish @williambanfield - +* @ebuchman @cmwaters @tychoish @williambanfield @creachadair diff --git a/.github/codecov.yml b/.github/codecov.yml index ca879ab64..57c4bb160 100644 --- a/.github/codecov.yml +++ b/.github/codecov.yml @@ -5,19 +5,14 @@ coverage: status: project: default: - threshold: 1% - patch: on + threshold: 20% + patch: off changes: off github_checks: annotations: false -comment: - layout: "diff, files" - behavior: default - require_changes: no - require_base: no - require_head: yes +comment: false ignore: - "docs" @@ -25,3 +20,6 @@ ignore: - "scripts" - "**/*.pb.go" - "libs/pubsub/query/query.peg.go" + - "*.md" + - "*.rst" + - "*.yml" diff --git a/.github/mergify.yml b/.github/mergify.yml index df570504a..d49264597 100644 --- a/.github/mergify.yml +++ b/.github/mergify.yml @@ -6,7 +6,7 @@ pull_request_rules: actions: merge: method: squash - strict: true + strict: smart+fasttrack commit_message: title+body - name: backport patches to v0.34.x branch conditions: @@ -16,3 +16,12 @@ pull_request_rules: backport: branches: - v0.34.x + - name: backport patches to v0.35.x branch + conditions: + - base=master + - label=S:backport-to-v0.35.x + actions: + backport: + branches: + - v0.35.x + diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 7d7ab8113..4a3b89074 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -2,6 +2,8 @@ name: Test Coverage on: pull_request: push: + paths: + - "**.go" branches: - master - release/** @@ -44,12 +46,13 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: "1.16" + go-version: "1.17" - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v4 + - uses: technote-space/get-diff-action@v5 with: PATTERNS: | **/**.go + "!test/" go.mod go.sum - name: install @@ -66,12 +69,13 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: "1.16" + go-version: "1.17" - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v4 + - uses: technote-space/get-diff-action@v5 with: PATTERNS: | **/**.go + "!test/" go.mod go.sum - uses: actions/download-artifact@v2 @@ -81,10 +85,10 @@ jobs: - name: Set up Go uses: actions/setup-go@v2 with: - go-version: 1.16 + go-version: "1.17" - name: test & coverage report creation run: | - cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out -covermode=atomic + cat pkgs.txt.part.${{ matrix.part }} | xargs go test -mod=readonly -timeout 8m -race -coverprofile=${{ matrix.part }}profile.out if: env.GIT_DIFF - uses: actions/upload-artifact@v2 with: @@ -96,10 +100,11 @@ jobs: needs: tests steps: - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v4 + - uses: technote-space/get-diff-action@v5 with: PATTERNS: | **/**.go + "!test/" go.mod go.sum - uses: actions/download-artifact@v2 @@ -119,9 +124,9 @@ jobs: name: "${{ github.sha }}-03-coverage" if: env.GIT_DIFF - run: | - cat ./*profile.out | grep -v "mode: atomic" >> coverage.txt + cat ./*profile.out | grep -v "mode: set" >> coverage.txt if: env.GIT_DIFF - - uses: codecov/codecov-action@v2.0.2 + - uses: codecov/codecov-action@v2.1.0 with: file: ./coverage.txt if: env.GIT_DIFF diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 009f16898..e773526fd 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -40,7 +40,7 @@ jobs: platforms: all - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1.5.0 + uses: docker/setup-buildx-action@v1.6.0 - name: Login to DockerHub if: ${{ github.event_name != 'pull_request' }} @@ -50,7 +50,7 @@ jobs: password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Publish to Docker Hub - uses: docker/build-push-action@v2.6.1 + uses: docker/build-push-action@v2.7.0 with: context: . file: ./DOCKER/Dockerfile diff --git a/.github/workflows/e2e-nightly-34x.yml b/.github/workflows/e2e-nightly-34x.yml index 9c3b74cda..d43bff12f 100644 --- a/.github/workflows/e2e-nightly-34x.yml +++ b/.github/workflows/e2e-nightly-34x.yml @@ -17,13 +17,13 @@ jobs: strategy: fail-fast: false matrix: - group: ['00', '01', '02', '03'] + group: ['00', '01'] runs-on: ubuntu-latest timeout-minutes: 60 steps: - uses: actions/setup-go@v2 with: - go-version: '1.16' + go-version: '1.17' - uses: actions/checkout@v2.3.4 with: @@ -37,7 +37,7 @@ jobs: - name: Generate testnets working-directory: test/e2e # When changing -g, also change the matrix groups above - run: ./build/generator -g 4 -d networks/nightly + run: ./build/generator -g 2 -d networks/nightly - name: Run testnets in group ${{ matrix.group }} working-directory: test/e2e diff --git a/.github/workflows/e2e-nightly-35x.yml b/.github/workflows/e2e-nightly-35x.yml new file mode 100644 index 000000000..b3acdf62b --- /dev/null +++ b/.github/workflows/e2e-nightly-35x.yml @@ -0,0 +1,76 @@ +# Runs randomly generated E2E testnets nightly on v0.35.x. + +# !! If you change something in this file, you probably want +# to update the e2e-nightly-master workflow as well! + +name: e2e-nightly-35x +on: + workflow_dispatch: # allow running workflow manually + schedule: + - cron: '0 2 * * *' + +jobs: + e2e-nightly-test: + # Run parallel jobs for the listed testnet groups (must match the + # ./build/generator -g flag) + strategy: + fail-fast: false + matrix: + p2p: ['legacy', 'new', 'hybrid'] + group: ['00', '01', '02', '03'] + runs-on: ubuntu-latest + timeout-minutes: 60 + steps: + - uses: actions/setup-go@v2 + with: + go-version: '1.17' + + - uses: actions/checkout@v2.3.4 + with: + ref: 'v0.35.x' + + - name: Build + working-directory: test/e2e + # Run make jobs in parallel, since we can't run steps in parallel. + run: make -j2 docker generator runner tests + + - name: Generate testnets + working-directory: test/e2e + # When changing -g, also change the matrix groups above + run: ./build/generator -g 4 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }} + + - name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }} + working-directory: test/e2e + run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml + + e2e-nightly-fail-2: + needs: e2e-nightly-test + if: ${{ failure() }} + runs-on: ubuntu-latest + steps: + - name: Notify Slack on failure + uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: tendermint-internal + SLACK_USERNAME: Nightly E2E Tests + SLACK_ICON_EMOJI: ':skull:' + SLACK_COLOR: danger + SLACK_MESSAGE: Nightly E2E tests failed on v0.35.x + SLACK_FOOTER: '' + + e2e-nightly-success: # may turn this off once they seem to pass consistently + needs: e2e-nightly-test + if: ${{ success() }} + runs-on: ubuntu-latest + steps: + - name: Notify Slack on success + uses: rtCamp/action-slack-notify@12e36fc18b0689399306c2e0b3e0f2978b7f1ee7 + env: + SLACK_WEBHOOK: ${{ secrets.SLACK_WEBHOOK }} + SLACK_CHANNEL: tendermint-internal + SLACK_USERNAME: Nightly E2E Tests + SLACK_ICON_EMOJI: ':white_check_mark:' + SLACK_COLOR: good + SLACK_MESSAGE: Nightly E2E tests passed on v0.35.x + SLACK_FOOTER: '' diff --git a/.github/workflows/e2e-nightly-master.yml b/.github/workflows/e2e-nightly-master.yml index d6d459abc..da8b07d70 100644 --- a/.github/workflows/e2e-nightly-master.yml +++ b/.github/workflows/e2e-nightly-master.yml @@ -10,39 +10,38 @@ on: - cron: '0 2 * * *' jobs: - e2e-nightly-test-2: + e2e-nightly-test: # Run parallel jobs for the listed testnet groups (must match the # ./build/generator -g flag) strategy: fail-fast: false matrix: - p2p: ['legacy', 'new', 'hybrid'] - group: ['00', '01'] + group: ['00', '01', '02', '03'] runs-on: ubuntu-latest timeout-minutes: 60 steps: - uses: actions/setup-go@v2 with: - go-version: '1.16' + go-version: '1.17' - uses: actions/checkout@v2.3.4 - name: Build working-directory: test/e2e # Run make jobs in parallel, since we can't run steps in parallel. - run: make -j2 docker generator runner + run: make -j2 docker generator runner tests - name: Generate testnets working-directory: test/e2e # When changing -g, also change the matrix groups above - run: ./build/generator -g 2 -d networks/nightly/${{ matrix.p2p }} -p ${{ matrix.p2p }} + run: ./build/generator -g 4 -d networks/nightly/ - - name: Run ${{ matrix.p2p }} p2p testnets in group ${{ matrix.group }} + - name: Run ${{ matrix.p2p }} p2p testnets working-directory: test/e2e - run: ./run-multiple.sh networks/nightly/${{ matrix.p2p }}/*-group${{ matrix.group }}-*.toml + run: ./run-multiple.sh networks/nightly/*-group${{ matrix.group }}-*.toml e2e-nightly-fail-2: - needs: e2e-nightly-test-2 + needs: e2e-nightly-test if: ${{ failure() }} runs-on: ubuntu-latest steps: @@ -58,7 +57,7 @@ jobs: SLACK_FOOTER: '' e2e-nightly-success: # may turn this off once they seem to pass consistently - needs: e2e-nightly-test-2 + needs: e2e-nightly-test if: ${{ success() }} runs-on: ubuntu-latest steps: diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 7fc3cde7a..134ae979c 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -16,9 +16,9 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: '1.16' + go-version: '1.17' - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v4 + - uses: technote-space/get-diff-action@v5 with: PATTERNS: | **/**.go @@ -28,15 +28,11 @@ jobs: - name: Build working-directory: test/e2e # Run two make jobs in parallel, since we can't run steps in parallel. - run: make -j2 docker runner + run: make -j2 docker runner tests if: "env.GIT_DIFF != ''" - name: Run CI testnet working-directory: test/e2e - run: ./build/runner -f networks/ci.toml + run: ./run-multiple.sh networks/ci.toml if: "env.GIT_DIFF != ''" - - name: Emit logs on failure - if: ${{ failure() }} - working-directory: test/e2e - run: ./build/runner -f networks/ci.toml logs diff --git a/.github/workflows/fuzz-nightly.yml b/.github/workflows/fuzz-nightly.yml index c47dc4411..38ca6896d 100644 --- a/.github/workflows/fuzz-nightly.yml +++ b/.github/workflows/fuzz-nightly.yml @@ -15,7 +15,7 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: '1.16' + go-version: '1.17' - uses: actions/checkout@v2.3.4 diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 4b0092afc..3e257e47c 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -14,7 +14,7 @@ jobs: timeout-minutes: 8 steps: - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v4 + - uses: technote-space/get-diff-action@v5 with: PATTERNS: | **/**.go @@ -23,7 +23,7 @@ jobs: - uses: golangci/golangci-lint-action@v2.5.2 with: # Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version. - version: v1.38 + version: v1.42.1 args: --timeout 10m github-token: ${{ secrets.github_token }} if: env.GIT_DIFF diff --git a/.github/workflows/proto-docker.yml b/.github/workflows/proto-docker.yml index 8dc612602..ee26bd111 100644 --- a/.github/workflows/proto-docker.yml +++ b/.github/workflows/proto-docker.yml @@ -34,7 +34,7 @@ jobs: echo ::set-output name=tags::${TAGS} - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1.5.0 + uses: docker/setup-buildx-action@v1.6.0 - name: Login to DockerHub uses: docker/login-action@v1.10.0 @@ -43,7 +43,7 @@ jobs: password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Publish to Docker Hub - uses: docker/build-push-action@v2.6.1 + uses: docker/build-push-action@v2.7.0 with: context: ./tools/proto file: ./tools/proto/Dockerfile diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 771b8ab7c..567a607ca 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -2,7 +2,7 @@ name: "Release" on: push: - branches: + branches: - "RC[0-9]/**" tags: - "v[0-9]+.[0-9]+.[0-9]+" # Push events to matching v*, i.e. v1.0, v20.15.10 @@ -18,10 +18,7 @@ jobs: - uses: actions/setup-go@v2 with: - go-version: '1.16' - - - run: echo https://github.com/tendermint/tendermint/blob/${GITHUB_REF#refs/tags/}/CHANGELOG.md#${GITHUB_REF#refs/tags/} > ../release_notes.md - if: startsWith(github.ref, 'refs/tags/') + go-version: '1.17' - name: Build uses: goreleaser/goreleaser-action@v2 @@ -35,6 +32,6 @@ jobs: if: startsWith(github.ref, 'refs/tags/') with: version: latest - args: release --rm-dist --release-notes=../release_notes.md + args: release --rm-dist env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 14bb8570b..abb1e848e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -17,9 +17,9 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: "1.16" + go-version: "1.17" - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v4 + - uses: technote-space/get-diff-action@v5 with: PATTERNS: | **/**.go @@ -49,9 +49,9 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: "1.16" + go-version: "1.17" - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v4 + - uses: technote-space/get-diff-action@v5 with: PATTERNS: | **/**.go @@ -80,9 +80,9 @@ jobs: steps: - uses: actions/setup-go@v2 with: - go-version: "1.16" + go-version: "1.17" - uses: actions/checkout@v2.3.4 - - uses: technote-space/get-diff-action@v4 + - uses: technote-space/get-diff-action@v5 with: PATTERNS: | **/**.go diff --git a/.golangci.yml b/.golangci.yml index f05cde90c..e0f3fe163 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -1,32 +1,35 @@ linters: enable: + - asciicheck - bodyclose - deadcode - depguard - dogsled - dupl - errcheck + - exportloopref # - funlen # - gochecknoglobals # - gochecknoinits + # - gocognit - goconst - - gocritic + # - gocritic # - gocyclo # - godox - gofmt - goimports - - golint + - revive - gosec - gosimple - govet - ineffassign # - interfacer - lll - - misspell # - maligned + - misspell - nakedret + - nolintlint - prealloc - - scopelint - staticcheck - structcheck - stylecheck @@ -37,9 +40,6 @@ linters: - varcheck # - whitespace # - wsl - # - gocognit - - nolintlint - - asciicheck issues: exclude-rules: diff --git a/CHANGELOG.md b/CHANGELOG.md index cbda6a678..e1ef2201a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,234 @@ # Changelog -Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint). +Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos). + +## v0.35.0-rc2 + +September 27, 2021 + +### BREAKING CHANGES + +- Go API + + - [crypto/armor]: [\#6963](https://github.com/tendermint/tendermint/pull/6963) remove package which is unused, and based on + deprecated fundamentals. Downstream users should maintain this + library. (@tychoish) + - [state] [store] [proxy] [rpc/core]: [\#6937](https://github.com/tendermint/tendermint/pull/6937) move packages to + `internal` to prevent consumption of these internal APIs by + external users. (@tychoish) + +### FEATURES + +- [\#6982](https://github.com/tendermint/tendermint/pull/6982) tendermint binary has built-in suppport for running the e2e application (with state sync support) (@cmwaters). + + +## v0.35.0-rc1 + +September 8, 2021 + +Special thanks to external contributors on this release: @JayT106, @bipulprasad, @alessio, @Yawning, @silasdavis, +@cuonglm, @tanyabouman, @JoeKash, @githubsands, @jeebster, @crypto-facs, @liamsi, and @gotjoshua + +### BREAKING CHANGES + +- CLI/RPC/Config + - [pubsub/events] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez) + - [config] [\#5598](https://github.com/tendermint/tendermint/pull/5598) The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker) + - [config] [\#5728](https://github.com/tendermint/tendermint/pull/5728) `fastsync.version = "v1"` is no longer supported (@melekes) + - [cli] [\#5772](https://github.com/tendermint/tendermint/pull/5772) `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes) + - [cli] [\#5777](https://github.com/tendermint/tendermint/pull/5777) use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters) + - [rpc] [\#6019](https://github.com/tendermint/tendermint/pull/6019) standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters) + - [rpc] [\#6168](https://github.com/tendermint/tendermint/pull/6168) Change default sorting to desc for `/tx_search` results (@melekes) + - [cli] [\#6282](https://github.com/tendermint/tendermint/pull/6282) User must specify the node mode when using `tendermint init` (@cmwaters) + - [state/indexer] [\#6382](https://github.com/tendermint/tendermint/pull/6382) reconstruct indexer, move txindex into the indexer package (@JayT106) + - [cli] [\#6372](https://github.com/tendermint/tendermint/pull/6372) Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters) + - [config] [\#6462](https://github.com/tendermint/tendermint/pull/6462) Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish) + - [rpc] [\#6610](https://github.com/tendermint/tendermint/pull/6610) Add MaxPeerBlockHeight into /status rpc call (@JayT106) + - [blocksync/rpc] [\#6620](https://github.com/tendermint/tendermint/pull/6620) Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106) + - [rpc/grpc] [\#6725](https://github.com/tendermint/tendermint/pull/6725) Mark gRPC in the RPC layer as deprecated. + - [blocksync/v2] [\#6730](https://github.com/tendermint/tendermint/pull/6730) Fast Sync v2 is deprecated, please use v0 + - [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents. + - [rpc/jsonrpc/server] [\#6785](https://github.com/tendermint/tendermint/pull/6785) `Listen` function updated to take an `int` argument, `maxOpenConnections`, instead of an entire config object. (@williambanfield) + - [rpc] [\#6820](https://github.com/tendermint/tendermint/pull/6820) Update RPC methods to reflect changes in the p2p layer, disabling support for `UnsafeDialPeers` and `UnsafeDialPeers` when used with the new p2p layer, and changing the response format of the peer list in `NetInfo` for all users. + - [cli] [\#6854](https://github.com/tendermint/tendermint/pull/6854) Remove deprecated snake case commands. (@tychoish) + +- Apps + - [ABCI] [\#6408](https://github.com/tendermint/tendermint/pull/6408) Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez) + - [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Remove `SetOption` method from `ABCI.Client` interface + - [ABCI] [\#5447](https://github.com/tendermint/tendermint/pull/5447) Reset `Oneof` indexes for `Request` and `Response`. + - [ABCI] [\#5818](https://github.com/tendermint/tendermint/pull/5818) Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters. + - [ABCI] [\#3546](https://github.com/tendermint/tendermint/pull/3546) Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield) + - [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` has been renamed to `TMVersion`. + - It is not required any longer to set ldflags to set version strings + - [abci/counter] [\#6684](https://github.com/tendermint/tendermint/pull/6684) Delete counter example app + +- Go API + - [pubsub] [\#6634](https://github.com/tendermint/tendermint/pull/6634) The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez) + - [p2p] [\#6618](https://github.com/tendermint/tendermint/pull/6618) [\#6583](https://github.com/tendermint/tendermint/pull/6583) Move `p2p.NodeInfo`, `p2p.NodeID` and `p2p.NetAddress` into `types` to support use in external packages. (@tychoish) + - [node] [\#6540](https://github.com/tendermint/tendermint/pull/6540) Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish) + - [p2p] [\#6547](https://github.com/tendermint/tendermint/pull/6547) Move the entire `p2p` package and all reactor implementations into `internal`. (@tychoish) + - [libs/log] [\#6534](https://github.com/tendermint/tendermint/pull/6534) Remove the existing custom Tendermint logger backed by go-kit. The logging interface, `Logger`, remains. Tendermint still provides a default logger backed by the performant zerolog logger. (@alexanderbez) + - [libs/time] [\#6495](https://github.com/tendermint/tendermint/pull/6495) Move types/time to libs/time to improve consistency. (@tychoish) + - [mempool] [\#6529](https://github.com/tendermint/tendermint/pull/6529) The `Context` field has been removed from the `TxInfo` type. `CheckTx` now requires a `Context` argument. (@alexanderbez) + - [abci/client, proxy] [\#5673](https://github.com/tendermint/tendermint/pull/5673) `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes) + - [p2p] Remove unused function `MakePoWTarget`. (@erikgrinaker) + - [libs/bits] [\#5720](https://github.com/tendermint/tendermint/pull/5720) Validate `BitArray` in `FromProto`, which now returns an error (@melekes) + - [proto/p2p] Rename `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker) + - [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker) + - [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio) + - [store] [\#5848](https://github.com/tendermint/tendermint/pull/5848) Remove block store state in favor of using the db iterators directly (@cmwaters) + - [state] [\#5864](https://github.com/tendermint/tendermint/pull/5864) Use an iterator when pruning state (@cmwaters) + - [types] [\#6023](https://github.com/tendermint/tendermint/pull/6023) Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`. + - Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic. + - [light] [\#6054](https://github.com/tendermint/tendermint/pull/6054) Move `MaxRetryAttempt` option from client to provider. + - `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters) + - [all] [\#6077](https://github.com/tendermint/tendermint/pull/6077) Change spelling from British English to American (@cmwaters) + - Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub + - Rename "behaviour" pkg to "behavior" and internalized it in blocksync v2 + - [rpc/client/http] [\#6176](https://github.com/tendermint/tendermint/pull/6176) Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes) + - [rpc/client/http] [\#6176](https://github.com/tendermint/tendermint/pull/6176) Unexpose `WSEvents` (@melekes) + - [rpc/jsonrpc/client/ws_client] [\#6176](https://github.com/tendermint/tendermint/pull/6176) `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes) + - [internal/libs] [\#6366](https://github.com/tendermint/tendermint/pull/6366) Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder + - [libs/rand] [\#6364](https://github.com/tendermint/tendermint/pull/6364) Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi) + - [mempool] [\#6466](https://github.com/tendermint/tendermint/pull/6466) The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package. + Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself + and `TxInfo`. (@alexanderbez) + - [crypto/sr25519] [\#6526](https://github.com/tendermint/tendermint/pull/6526) Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning) + - [types] [\#6627](https://github.com/tendermint/tendermint/pull/6627) Move `NodeKey` to types to make the type public. + - [config] [\#6627](https://github.com/tendermint/tendermint/pull/6627) Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID` + - [blocksync] [\#6755](https://github.com/tendermint/tendermint/pull/6755) Rename `FastSync` and `Blockchain` package to `BlockSync` (@cmwaters) + +- Data Storage + - [store/state/evidence/light] [\#5771](https://github.com/tendermint/tendermint/pull/5771) Use an order-preserving varint key encoding (@cmwaters) + - [mempool] [\#6396](https://github.com/tendermint/tendermint/pull/6396) Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish) + - [state] [\#6541](https://github.com/tendermint/tendermint/pull/6541) Move pruneBlocks from consensus/state to state/execution. (@JayT106) + +- Tooling + - [tools] [\#6498](https://github.com/tendermint/tendermint/pull/6498) Set OS home dir to instead of the hardcoded PATH. (@JayT106) + - [cli/indexer] [\#6676](https://github.com/tendermint/tendermint/pull/6676) Reindex events command line tooling. (@JayT106) + +### FEATURES + +- [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam +- [rpc] [\#6329](https://github.com/tendermint/tendermint/pull/6329) Don't cap page size in unsafe mode (@gotjoshua, @cmwaters) +- [pex] [\#6305](https://github.com/tendermint/tendermint/pull/6305) v2 pex reactor with backwards compatability. Introduces two new pex messages to + accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer + exchange reactors behave the same. (@cmwaters) +- [crypto] [\#6376](https://github.com/tendermint/tendermint/pull/6376) Enable sr25519 as a validator key type +- [mempool] [\#6466](https://github.com/tendermint/tendermint/pull/6466) Introduction of a prioritized mempool. (@alexanderbez) + - `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of + the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index. + - Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the + `mempool.version` configuration, where `v1` is the default configuration. + - Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node. + - Transactions are gossiped in FIFO order as they are in `v0`. +- [config/indexer] [\#6411](https://github.com/tendermint/tendermint/pull/6411) Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106) +- [blocksync/event] [\#6619](https://github.com/tendermint/tendermint/pull/6619) Emit blocksync status event when switching consensus/blocksync (@JayT106) +- [statesync/event] [\#6700](https://github.com/tendermint/tendermint/pull/6700) Emit statesync status start/end event (@JayT106) +- [inspect] [\#6785](https://github.com/tendermint/tendermint/pull/6785) Add a new `inspect` command for introspecting the state and block store of a crashed tendermint node. (@williambanfield) + +### IMPROVEMENTS + +- [libs/log] Console log formatting changes as a result of [\#6534](https://github.com/tendermint/tendermint/pull/6534) and [\#6589](https://github.com/tendermint/tendermint/pull/6589). (@tychoish) +- [statesync] [\#6566](https://github.com/tendermint/tendermint/pull/6566) Allow state sync fetchers and request timeout to be configurable. (@alexanderbez) +- [types] [\#6478](https://github.com/tendermint/tendermint/pull/6478) Add `block_id` to `newblock` event (@jeebster) +- [crypto/ed25519] [\#5632](https://github.com/tendermint/tendermint/pull/5632) Adopt zip215 `ed25519` verification. (@marbar3778) +- [crypto/ed25519] [\#6526](https://github.com/tendermint/tendermint/pull/6526) Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning) +- [crypto/sr25519] [\#6526](https://github.com/tendermint/tendermint/pull/6526) Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning) +- [privval] [\#5603](https://github.com/tendermint/tendermint/pull/5603) Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys. +- [privval] [\#5725](https://github.com/tendermint/tendermint/pull/5725) Add gRPC support to private validator. +- [privval] [\#5876](https://github.com/tendermint/tendermint/pull/5876) `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778) +- [abci/client] [\#5673](https://github.com/tendermint/tendermint/pull/5673) `Async` requests return an error if queue is full (@melekes) +- [mempool] [\#5673](https://github.com/tendermint/tendermint/pull/5673) Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes) +- [abci] [\#5706](https://github.com/tendermint/tendermint/pull/5706) Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778) +- [blocksync/v1] [\#5728](https://github.com/tendermint/tendermint/pull/5728) Remove blocksync v1 (@melekes) +- [blocksync/v0] [\#5741](https://github.com/tendermint/tendermint/pull/5741) Relax termination conditions and increase sync timeout (@melekes) +- [cli] [\#5772](https://github.com/tendermint/tendermint/pull/5772) `gen_node_key` output now contains node ID (`id` field) (@melekes) +- [blocksync/v2] [\#5774](https://github.com/tendermint/tendermint/pull/5774) Send status request when new peer joins (@melekes) +- [store] [\#5888](https://github.com/tendermint/tendermint/pull/5888) store.SaveBlock saves using batches instead of transactions for now to improve ACID properties. This is a quick fix for underlying issues around tm-db and ACID guarantees. (@githubsands) +- [consensus] [\#5987](https://github.com/tendermint/tendermint/pull/5987) and [\#5792](https://github.com/tendermint/tendermint/pull/5792) Remove the `time_iota_ms` consensus parameter. Merge `tmproto.ConsensusParams` and `abci.ConsensusParams`. (@marbar3778, @valardragon) +- [types] [\#5994](https://github.com/tendermint/tendermint/pull/5994) Reduce the use of protobuf types in core logic. (@marbar3778) + - `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams`, `sm.Version` and `version.Consensus` have become native types. They still utilize protobuf when being sent over the wire or written to disk. +- [rpc/client/http] [\#6163](https://github.com/tendermint/tendermint/pull/6163) Do not drop events even if the `out` channel is full (@melekes) +- [node] [\#6059](https://github.com/tendermint/tendermint/pull/6059) Validate and complete genesis doc before saving to state store (@silasdavis) +- [state] [\#6067](https://github.com/tendermint/tendermint/pull/6067) Batch save state data (@githubsands & @cmwaters) +- [crypto] [\#6120](https://github.com/tendermint/tendermint/pull/6120) Implement batch verification interface for ed25519 and sr25519. (@marbar3778) +- [types] [\#6120](https://github.com/tendermint/tendermint/pull/6120) use batch verification for verifying commits signatures. + - If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature. +- [privval/file] [\#6185](https://github.com/tendermint/tendermint/pull/6185) Return error on `LoadFilePV`, `LoadFilePVEmptyState`. Allows for better programmatic control of Tendermint. +- [privval] [\#6240](https://github.com/tendermint/tendermint/pull/6240) Add `context.Context` to privval interface. +- [rpc] [\#6265](https://github.com/tendermint/tendermint/pull/6265) set cache control in http-rpc response header (@JayT106) +- [statesync] [\#6378](https://github.com/tendermint/tendermint/pull/6378) Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. +- [node/state] [\#6370](https://github.com/tendermint/tendermint/pull/6370) graceful shutdown in the consensus reactor (@JayT106) +- [crypto/merkle] [\#6443](https://github.com/tendermint/tendermint/pull/6443) Improve HashAlternatives performance (@cuonglm) +- [crypto/merkle] [\#6513](https://github.com/tendermint/tendermint/pull/6513) Optimize HashAlternatives (@marbar3778) +- [p2p/pex] [\#6509](https://github.com/tendermint/tendermint/pull/6509) Improve addrBook.hash performance (@cuonglm) +- [consensus/metrics] [\#6549](https://github.com/tendermint/tendermint/pull/6549) Change block_size gauge to a histogram for better observability over time (@marbar3778) +- [statesync] [\#6587](https://github.com/tendermint/tendermint/pull/6587) Increase chunk priority and re-request chunks that don't arrive (@cmwaters) +- [state/privval] [\#6578](https://github.com/tendermint/tendermint/pull/6578) No GetPubKey retry beyond the proposal/voting window (@JayT106) +- [rpc] [\#6615](https://github.com/tendermint/tendermint/pull/6615) Add TotalGasUsed to block_results response (@crypto-facs) +- [cmd/tendermint/commands] [\#6623](https://github.com/tendermint/tendermint/pull/6623) replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman) +- [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters) + +### BUG FIXES + +- [privval] [\#5638](https://github.com/tendermint/tendermint/pull/5638) Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash) +- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters) +- [rpc] [\#6507](https://github.com/tendermint/tendermint/pull/6507) Ensure RPC client can handle URLs without ports (@JayT106) +- [statesync] [\#6463](https://github.com/tendermint/tendermint/pull/6463) Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters) +- [blocksync] [\#6590](https://github.com/tendermint/tendermint/pull/6590) Update the metrics during blocksync (@JayT106) + +## v0.34.14 + +This release backports the `rollback` feature to allow recovery in the event of an incorrect app hash. + +### FEATURES + +- [\#6982](https://github.com/tendermint/tendermint/pull/6982) The tendermint binary now has built-in suppport for running the end-to-end test application (with state sync support) (@cmwaters). +- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state. This may be useful in the event of non-determinstic app hash or when reverting an upgrade. @cmwaters + +### IMPROVEMENTS + +- [\#7103](https://github.com/tendermint/tendermint/pull/7104) Remove IAVL dependency (backport of #6550) (@cmwaters) + +### BUG FIXES + +- [\#7057](https://github.com/tendermint/tendermint/pull/7057) Import Postgres driver support for the psql indexer (@creachadair). +- [ABCI] [\#7110](https://github.com/tendermint/tendermint/issues/7110) Revert "change client to use multi-reader mutexes (#6873)" (@tychoish). + +## v0.34.13 + +*September 6, 2021* + +This release backports improvements to state synchronization and ABCI +performance under concurrent load, and the PostgreSQL event indexer. + +### IMPROVEMENTS + +- [statesync] [\#6881](https://github.com/tendermint/tendermint/issues/6881) improvements to stateprovider logic (@cmwaters) +- [ABCI] [\#6873](https://github.com/tendermint/tendermint/issues/6873) change client to use multi-reader mutexes (@tychoish) +- [indexing] [\#6906](https://github.com/tendermint/tendermint/issues/6906) enable the PostgreSQL indexer sink (@creachadair) + +## v0.34.12 + +*August 17, 2021* + +Special thanks to external contributors on this release: @JayT106. + +### FEATURES + +- [rpc] [\#6717](https://github.com/tendermint/tendermint/pull/6717) introduce + `/genesis_chunked` rpc endpoint for handling large genesis files by chunking them (@tychoish) + +### IMPROVEMENTS + +- [rpc] [\#6825](https://github.com/tendermint/tendermint/issues/6825) Remove egregious INFO log from `ABCI#Query` RPC. (@alexanderbez) + +### BUG FIXES + +- [light] [\#6685](https://github.com/tendermint/tendermint/pull/6685) fix bug + with incorrectly handling contexts that would occasionally freeze state sync. (@cmwaters) +- [privval] [\#6748](https://github.com/tendermint/tendermint/issues/6748) Fix vote timestamp to prevent chain halt (@JayT106) ## v0.34.11 @@ -12,25 +240,25 @@ adding two new parameters to the state sync config. ### BREAKING CHANGES - Apps - - [Version] \#6494 `TMCoreSemVer` is not required to be set as a ldflag any longer. + - [Version] [\#6494](https://github.com/tendermint/tendermint/pull/6494) `TMCoreSemVer` is not required to be set as a ldflag any longer. ### IMPROVEMENTS -- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez) -- [statesync] \#6378 Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish) -- [statesync] \#6582 Increase chunk priority and add multiple retry chunk requests (@cmwaters) +- [statesync] [\#6566](https://github.com/tendermint/tendermint/pull/6566) Allow state sync fetchers and request timeout to be configurable. (@alexanderbez) +- [statesync] [\#6378](https://github.com/tendermint/tendermint/pull/6378) Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. (@tychoish) +- [statesync] [\#6582](https://github.com/tendermint/tendermint/pull/6582) Increase chunk priority and add multiple retry chunk requests (@cmwaters) ### BUG FIXES -- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters) +- [evidence] [\#6375](https://github.com/tendermint/tendermint/pull/6375) Fix bug with inconsistent LightClientAttackEvidence hashing (@cmwaters) ## v0.34.10 *April 14, 2021* -This release fixes a bug where peers would sometimes try to send messages +This release fixes a bug where peers would sometimes try to send messages on incorrect channels. Special thanks to our friends at Oasis Labs for surfacing -this issue! +this issue! - [p2p/node] [\#6339](https://github.com/tendermint/tendermint/issues/6339) Fix bug with using custom channels (@cmwaters) - [light] [\#6346](https://github.com/tendermint/tendermint/issues/6346) Correctly handle too high errors to improve client robustness (@cmwaters) @@ -39,7 +267,7 @@ this issue! *April 8, 2021* -This release fixes a moderate severity security issue, Security Advisory Alderfly, +This release fixes a moderate severity security issue, Security Advisory Alderfly, which impacts all networks that rely on Tendermint light clients. Further details will be released once networks have upgraded. @@ -112,7 +340,7 @@ shout-out to @marbar3778 for diagnosing it quickly. ## v0.34.6 -*February 18, 2021* +*February 18, 2021* _Tendermint Core v0.34.5 and v0.34.6 have been recalled due to release tooling problems._ @@ -120,9 +348,9 @@ _Tendermint Core v0.34.5 and v0.34.6 have been recalled due to release tooling p *February 11, 2021* -This release includes a fix for a memory leak in the evidence reactor (see #6068, below). -All Tendermint clients are recommended to upgrade. -Thank you to our friends at Crypto.com for the initial report of this memory leak! +This release includes a fix for a memory leak in the evidence reactor (see #6068, below). +All Tendermint clients are recommended to upgrade. +Thank you to our friends at Crypto.com for the initial report of this memory leak! Special thanks to other external contributors on this release: @yayajacky, @odidev, @laniehei, and @c29r3! @@ -132,17 +360,17 @@ Special thanks to other external contributors on this release: @yayajacky, @odid - [light] [\#6026](https://github.com/tendermint/tendermint/pull/6026) Fix a bug when height isn't provided for the rpc calls: `/commit` and `/validators` (@cmwaters) - [evidence] [\#6068](https://github.com/tendermint/tendermint/pull/6068) Terminate broadcastEvidenceRoutine when peer is stopped (@melekes) -## v0.34.3 +## v0.34.3 *January 19, 2021* -This release includes a fix for a high-severity security vulnerability, +This release includes a fix for a high-severity security vulnerability, a DoS-vector that impacted Tendermint Core v0.34.0-v0.34.2. For more details, see -[Security Advisory Mulberry](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg) -or https://nvd.nist.gov/vuln/detail/CVE-2021-21271. +[Security Advisory Mulberry](https://github.com/tendermint/tendermint/security/advisories/GHSA-p658-8693-mhvg) +or https://nvd.nist.gov/vuln/detail/CVE-2021-21271. Tendermint Core v0.34.3 also updates GoGo Protobuf to 1.3.2 in order to pick up the fix for -https://nvd.nist.gov/vuln/detail/CVE-2021-3121. +https://nvd.nist.gov/vuln/detail/CVE-2021-3121. ### BUG FIXES @@ -234,14 +462,14 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze, - [blockchain] [\#4637](https://github.com/tendermint/tendermint/pull/4637) Migrate blockchain reactor(s) to Protobuf encoding (@marbar3778) - [evidence] [\#4949](https://github.com/tendermint/tendermint/pull/4949) Migrate evidence reactor to Protobuf encoding (@marbar3778) - [mempool] [\#4940](https://github.com/tendermint/tendermint/pull/4940) Migrate mempool from to Protobuf encoding (@marbar3778) - - [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes) + - [mempool] [\#5321](https://github.com/tendermint/tendermint/pull/5321) Batch transactions when broadcasting them to peers (@melekes) - `MaxBatchBytes` new config setting defines the max size of one batch. - [p2p/pex] [\#4973](https://github.com/tendermint/tendermint/pull/4973) Migrate `p2p/pex` reactor to Protobuf encoding (@marbar3778) - [statesync] [\#4943](https://github.com/tendermint/tendermint/pull/4943) Migrate state sync reactor to Protobuf encoding (@marbar3778) - Blockchain Protocol - - [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778) + - [evidence] [\#4725](https://github.com/tendermint/tendermint/pull/4725) Remove `Pubkey` from `DuplicateVoteEvidence` (@marbar3778) - [evidence] [\#5499](https://github.com/tendermint/tendermint/pull/5449) Cap evidence to a maximum number of bytes (supercedes [\#4780](https://github.com/tendermint/tendermint/pull/4780)) (@cmwaters) - [merkle] [\#5193](https://github.com/tendermint/tendermint/pull/5193) Header hashes are no longer empty for empty inputs, notably `DataHash`, `EvidenceHash`, and `LastResultsHash` (@erikgrinaker) - [state] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Include `GasWanted` and `GasUsed` into `LastResultsHash` (@melekes) @@ -300,7 +528,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze, - [types] [\#4852](https://github.com/tendermint/tendermint/pull/4852) Vote & Proposal `SignBytes` is now func `VoteSignBytes` & `ProposalSignBytes` (@marbar3778) - [types] [\#4798](https://github.com/tendermint/tendermint/pull/4798) Simplify `VerifyCommitTrusting` func + remove extra validation (@melekes) - [types] [\#4845](https://github.com/tendermint/tendermint/pull/4845) Remove `ABCIResult` (@melekes) - - [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778) + - [types] [\#5029](https://github.com/tendermint/tendermint/pull/5029) Rename all values from `PartsHeader` to `PartSetHeader` to have consistency (@marbar3778) - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) `Total` in `Parts` & `PartSetHeader` has been changed from a `int` to a `uint32` (@marbar3778) - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Vote: `ValidatorIndex` & `Round` are now `int32` (@marbar3778) - [types] [\#4939](https://github.com/tendermint/tendermint/pull/4939) Proposal: `POLRound` & `Round` are now `int32` (@marbar3778) @@ -338,7 +566,7 @@ Special thanks to external contributors on this release: @james-ray, @fedekunze, - [evidence] [\#4722](https://github.com/tendermint/tendermint/pull/4722) Consolidate evidence store and pool types to improve evidence DB (@cmwaters) - [evidence] [\#4839](https://github.com/tendermint/tendermint/pull/4839) Reject duplicate evidence from being proposed (@cmwaters) - [evidence] [\#5219](https://github.com/tendermint/tendermint/pull/5219) Change the source of evidence time to block time (@cmwaters) -- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778) +- [libs] [\#5126](https://github.com/tendermint/tendermint/pull/5126) Add a sync package which wraps sync.(RW)Mutex & deadlock.(RW)Mutex and use a build flag (deadlock) in order to enable deadlock checking (@marbar3778) - [light] [\#4935](https://github.com/tendermint/tendermint/pull/4935) Fetch and compare a new header with witnesses in parallel (@melekes) - [light] [\#4929](https://github.com/tendermint/tendermint/pull/4929) Compare header with witnesses only when doing bisection (@melekes) - [light] [\#4916](https://github.com/tendermint/tendermint/pull/4916) Validate basic for inbound validator sets and headers before further processing them (@cmwaters) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 9de5b8bcb..9d8148783 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,161 +1,42 @@ # Unreleased Changes +Friendly reminder: We have a [bug bounty program](https://hackerone.com/cosmos). + ## vX.X -Special thanks to external contributors on this release: +Month, DD, YYYY -Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermint). +Special thanks to external contributors on this release: ### BREAKING CHANGES - CLI/RPC/Config - - [pubsub/events] \#6634 The `ResultEvent.Events` field is now of type `[]abci.Event` preserving event order instead of `map[string][]string`. (@alexanderbez) - - [config] \#5598 The `test_fuzz` and `test_fuzz_config` P2P settings have been removed. (@erikgrinaker) - - [config] \#5728 `fast_sync = "v1"` is no longer supported (@melekes) - - [cli] \#5772 `gen_node_key` prints JSON-encoded `NodeKey` rather than ID and does not save it to `node_key.json` (@melekes) - - [cli] \#5777 use hyphen-case instead of snake_case for all cli commands and config parameters (@cmwaters) - - [rpc] \#6019 standardise RPC errors and return the correct status code (@bipulprasad & @cmwaters) - - [rpc] \#6168 Change default sorting to desc for `/tx_search` results (@melekes) - - [cli] \#6282 User must specify the node mode when using `tendermint init` (@cmwaters) - - [state/indexer] \#6382 reconstruct indexer, move txindex into the indexer package (@JayT106) - - [cli] \#6372 Introduce `BootstrapPeers` as part of the new p2p stack. Peers to be connected on startup (@cmwaters) - - [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish) - - [rpc] \#6610 Add MaxPeerBlockHeight into /status rpc call (@JayT106) - - [fastsync/rpc] \#6620 Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106) - - [rpc/grpc] \#6725 Mark gRPC in the RPC layer as deprecated. - - [blockchain/v2] \#6730 Fast Sync v2 is deprecated, please use v0 - - [rpc] Add genesis_chunked method to support paginated and parallel fetching of large genesis documents. + + - [rpc] Remove the deprecated gRPC interface to the RPC service (@creachadair). - Apps - - [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez) - - [ABCI] \#5447 Remove `SetOption` method from `ABCI.Client` interface - - [ABCI] \#5447 Reset `Oneof` indexes for `Request` and `Response`. - - [ABCI] \#5818 Use protoio for msg length delimitation. Migrates from int64 to uint64 length delimiters. - - [ABCI] \#3546 Add `mempool_error` field to `ResponseCheckTx`. This field will contain an error string if Tendermint encountered an error while adding a transaction to the mempool. (@williambanfield) - - [Version] \#6494 `TMCoreSemVer` has been renamed to `TMVersion`. - - It is not required any longer to set ldflags to set version strings - - [abci/counter] \#6684 Delete counter example app - P2P Protocol + - [p2p] \#7035 Remove legacy P2P routing implementation and + associated configuration options (@tychoish) + - Go API - - [pubsub] \#6634 The `Query#Matches` method along with other pubsub methods, now accepts a `[]abci.Event` instead of `map[string][]string`. (@alexanderbez) - - [p2p] \#6618 Move `p2p.NodeInfo` into `types` to support use of the SDK. (@tychoish) - - [p2p] \#6583 Make `p2p.NodeID` and `p2p.NetAddress` exported types to support their use in the RPC layer. (@tychoish) - - [node] \#6540 Reduce surface area of the `node` package by making most of the implementation details private. (@tychoish) - - [p2p] \#6547 Move the entire `p2p` package and all reactor implementations into `internal`. (@tychoish) - - [libs/log] \#6534 Remove the existing custom Tendermint logger backed by go-kit. The logging interface, `Logger`, remains. Tendermint still provides a default logger backed by the performant zerolog logger. (@alexanderbez) - - [libs/time] \#6495 Move types/time to libs/time to improve consistency. (@tychoish) - - [mempool] \#6529 The `Context` field has been removed from the `TxInfo` type. `CheckTx` now requires a `Context` argument. (@alexanderbez) - - [abci/client, proxy] \#5673 `Async` funcs return an error, `Sync` and `Async` funcs accept `context.Context` (@melekes) - - [p2p] Remove unused function `MakePoWTarget`. (@erikgrinaker) - - [libs/bits] \#5720 Validate `BitArray` in `FromProto`, which now returns an error (@melekes) - - [proto/p2p] Rename `DefaultNodeInfo` and `DefaultNodeInfoOther` to `NodeInfo` and `NodeInfoOther` (@erikgrinaker) - - [proto/p2p] Rename `NodeInfo.default_node_id` to `node_id` (@erikgrinaker) - - [libs/os] Kill() and {Must,}{Read,Write}File() functions have been removed. (@alessio) - - [store] \#5848 Remove block store state in favor of using the db iterators directly (@cmwaters) - - [state] \#5864 Use an iterator when pruning state (@cmwaters) - - [types] \#6023 Remove `tm2pb.Header`, `tm2pb.BlockID`, `tm2pb.PartSetHeader` and `tm2pb.NewValidatorUpdate`. - - Each of the above types has a `ToProto` and `FromProto` method or function which replaced this logic. - - [light] \#6054 Move `MaxRetryAttempt` option from client to provider. - - `NewWithOptions` now sets the max retry attempts and timeouts (@cmwaters) - - [all] \#6077 Change spelling from British English to American (@cmwaters) - - Rename "Subscription.Cancelled()" to "Subscription.Canceled()" in libs/pubsub - - Rename "behaviour" pkg to "behavior" and internalized it in blockchain v2 - - [rpc/client/http] \#6176 Remove `endpoint` arg from `New`, `NewWithTimeout` and `NewWithClient` (@melekes) - - [rpc/client/http] \#6176 Unexpose `WSEvents` (@melekes) - - [rpc/jsonrpc/client/ws_client] \#6176 `NewWS` no longer accepts options (use `NewWSWithOptions` and `OnReconnect` funcs to configure the client) (@melekes) - - [internal/libs] \#6366 Move `autofile`, `clist`,`fail`,`flowrate`, `protoio`, `sync`, `tempfile`, `test` and `timer` lib packages to an internal folder - - [libs/rand] \#6364 Remove most of libs/rand in favour of standard lib's `math/rand` (@liamsi) - - [mempool] \#6466 The original mempool reactor has been versioned as `v0` and moved to a sub-package under the root `mempool` package. - Some core types have been kept in the `mempool` package such as `TxCache` and it's implementations, the `Mempool` interface itself - and `TxInfo`. (@alexanderbez) - - [crypto/sr25519] \#6526 Do not re-execute the Ed25519-style key derivation step when doing signing and verification. The derivation is now done once and only once. This breaks `sr25519.GenPrivKeyFromSecret` output compatibility. (@Yawning) - - [types] \#6627 Move `NodeKey` to types to make the type public. - - [config] \#6627 Extend `config` to contain methods `LoadNodeKeyID` and `LoadorGenNodeKeyID` - - [blocksync] \#6755 Rename `FastSync` and `Blockchain` package to `BlockSync` - (@cmwaters) + + - [blocksync] \#7046 Remove v2 implementation of the blocksync + service and recactor, which was disabled in the previous release + (@tychoish) + - [p2p] \#7064 Remove WDRR queue implementation. (@tychoish) - Blockchain Protocol -- Data Storage - - [store/state/evidence/light] \#5771 Use an order-preserving varint key encoding (@cmwaters) - - [mempool] \#6396 Remove mempool's write ahead log (WAL), (previously unused by the tendermint code). (@tychoish) - - [state] \#6541 Move pruneBlocks from consensus/state to state/execution. (@JayT106) - -- Tooling - - [tools] \#6498 Set OS home dir to instead of the hardcoded PATH. (@JayT106) - - [cli/indexer] \#6676 Reindex events command line tooling. (@JayT106) - ### FEATURES -- [config] Add `--mode` flag and config variable. See [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) @dongsam -- [rpc] \#6329 Don't cap page size in unsafe mode (@gotjoshua, @cmwaters) -- [pex] \#6305 v2 pex reactor with backwards compatability. Introduces two new pex messages to - accomodate for the new p2p stack. Removes the notion of seeds and crawling. All peer - exchange reactors behave the same. (@cmwaters) -- [crypto] \#6376 Enable sr25519 as a validator key -- [mempool] \#6466 Introduction of a prioritized mempool. (@alexanderbez) - - `Priority` and `Sender` have been introduced into the `ResponseCheckTx` type, where the `priority` will determine the prioritization of - the transaction when a proposer reaps transactions for a block proposal. The `sender` field acts as an index. - - Operators may toggle between the legacy mempool reactor, `v0`, and the new prioritized reactor, `v1`, by setting the - `mempool.version` configuration, where `v1` is the default configuration. - - Applications that do not specify a priority, i.e. zero, will have transactions reaped by the order in which they are received by the node. - - Transactions are gossiped in FIFO order as they are in `v0`. -- [config/indexer] \#6411 Introduce support for custom event indexing data sources, specifically PostgreSQL. (@JayT106) -- [fastsync/event] \#6619 Emit fastsync status event when switching consensus/fastsync (@JayT106) -- [statesync/event] \#6700 Emit statesync status start/end event (@JayT106) +- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of non-determinstic app hash or reverting an upgrade. +- [mempool, rpc] \#7041 Add removeTx operation to the RPC layer. (@tychoish) ### IMPROVEMENTS -- [libs/log] Console log formatting changes as a result of \#6534 and \#6589. (@tychoish) -- [statesync] \#6566 Allow state sync fetchers and request timeout to be configurable. (@alexanderbez) -- [types] \#6478 Add `block_id` to `newblock` event (@jeebster) -- [crypto/ed25519] \#5632 Adopt zip215 `ed25519` verification. (@marbar3778) -- [crypto/ed25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `ed25519` signing and verification. (@Yawning) -- [crypto/sr25519] \#6526 Use [curve25519-voi](https://github.com/oasisprotocol/curve25519-voi) for `sr25519` signing and verification. (@Yawning) -- [privval] \#5603 Add `--key` to `init`, `gen_validator`, `testnet` & `unsafe_reset_priv_validator` for use in generating `secp256k1` keys. -- [privval] \#5725 Add gRPC support to private validator. -- [privval] \#5876 `tendermint show-validator` will query the remote signer if gRPC is being used (@marbar3778) -- [abci/client] \#5673 `Async` requests return an error if queue is full (@melekes) -- [mempool] \#5673 Cancel `CheckTx` requests if RPC client disconnects or times out (@melekes) -- [abci] \#5706 Added `AbciVersion` to `RequestInfo` allowing applications to check ABCI version when connecting to Tendermint. (@marbar3778) -- [blockchain/v1] \#5728 Remove in favor of v2 (@melekes) -- [blockchain/v0] \#5741 Relax termination conditions and increase sync timeout (@melekes) -- [cli] \#5772 `gen_node_key` output now contains node ID (`id` field) (@melekes) -- [blockchain/v2] \#5774 Send status request when new peer joins (@melekes) -- [consensus] \#5792 Deprecates the `time_iota_ms` consensus parameter, to reduce the bug surface. The parameter is no longer used. (@valardragon) -- [store] \#5888 store.SaveBlock saves using batches instead of transactions for now to improve ACID properties. This is a quick fix for underlying issues around tm-db and ACID guarantees. (@githubsands) -- [consensus] \#5987 Remove `time_iota_ms` from consensus params. Merge `tmproto.ConsensusParams` and `abci.ConsensusParams`. (@marbar3778) -- [types] \#5994 Reduce the use of protobuf types in core logic. (@marbar3778) - - `ConsensusParams`, `BlockParams`, `ValidatorParams`, `EvidenceParams`, `VersionParams`, `sm.Version` and `version.Consensus` have become native types. They still utilize protobuf when being sent over the wire or written to disk. -- [rpc/client/http] \#6163 Do not drop events even if the `out` channel is full (@melekes) -- [node] \#6059 Validate and complete genesis doc before saving to state store (@silasdavis) -- [state] \#6067 Batch save state data (@githubsands & @cmwaters) -- [crypto] \#6120 Implement batch verification interface for ed25519 and sr25519. (@marbar3778) -- [types] \#6120 use batch verification for verifying commits signatures. - - If the key type supports the batch verification API it will try to batch verify. If the verification fails we will single verify each signature. -- [privval/file] \#6185 Return error on `LoadFilePV`, `LoadFilePVEmptyState`. Allows for better programmatic control of Tendermint. -- [privval] \#6240 Add `context.Context` to privval interface. -- [rpc] \#6265 set cache control in http-rpc response header (@JayT106) -- [statesync] \#6378 Retry requests for snapshots and add a minimum discovery time (5s) for new snapshots. -- [node/state] \#6370 graceful shutdown in the consensus reactor (@JayT106) -- [crypto/merkle] \#6443 Improve HashAlternatives performance (@cuonglm) -- [crypto/merkle] \#6513 Optimize HashAlternatives (@marbar3778) -- [p2p/pex] \#6509 Improve addrBook.hash performance (@cuonglm) -- [consensus/metrics] \#6549 Change block_size gauge to a histogram for better observability over time (@marbar3778) -- [statesync] \#6587 Increase chunk priority and re-request chunks that don't arrive (@cmwaters) -- [state/privval] \#6578 No GetPubKey retry beyond the proposal/voting window (@JayT106) -- [rpc] \#6615 Add TotalGasUsed to block_results response (@crypto-facs) -- [cmd/tendermint/commands] \#6623 replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman) ### BUG FIXES -- [privval] \#5638 Increase read/write timeout to 5s and calculate ping interval based on it (@JoeKash) -- [blockchain/v1] [\#5701](https://github.com/tendermint/tendermint/pull/5701) Handle peers without blocks (@melekes) -- [blockchain/v1] \#5711 Fix deadlock (@melekes) -- [evidence] \#6375 Fix bug with inconsistent LightClientAttackEvidence hashing (cmwaters) -- [rpc] \#6507 fix RPC client doesn't handle url's without ports (@JayT106) -- [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters) -- [fastsync] \#6590 Update the metrics during fast-sync (@JayT106) -- [gitignore] \#6668 Fix gitignore of abci-cli (@tanyabouman) -- [light] \#6687 Fix bug with incorrecly handled contexts in the light client (@cmwaters) +- fix: assignment copies lock value in `BitArray.UnmarshalJSON()` (@lklimek) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a8bc8aa8f..16bef07cc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -227,116 +227,6 @@ Fixes #nnnn Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though! -### Release Procedure - -#### Major Release - -This major release process assumes that this release was preceded by release candidates. -If there were no release candidates, and you'd like to cut a major release directly from master, see below. - -1. Start on the latest RC branch (`RCx/vX.X.0`). -2. Run integration tests. -3. Branch off of the RC branch (`git checkout -b release-prep`) and prepare the release: - - "Squash" changes from the changelog entries for the RCs into a single entry, - and add all changes included in `CHANGELOG_PENDING.md`. - (Squashing includes both combining all entries, as well as removing or simplifying - any intra-RC changes. It may also help to alphabetize the entries by package name.) - - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for - all PRs - - Ensure that UPGRADING.md is up-to-date and includes notes on any breaking changes - or other upgrading flows. - - Bump TMVersionDefault version in `version.go` - - Bump P2P and block protocol versions in `version.go`, if necessary - - Bump ABCI protocol version in `version.go`, if necessary - - Add any release notes you would like to be added to the body of the release to `release_notes.md`. -4. Open a PR with these changes against the RC branch (`RCx/vX.X.0`). -5. Once these changes are on the RC branch, branch off of the RC branch again to create a release branch: - - `git checkout RCx/vX.X.0` - - `git checkout -b release/vX.X.0` -6. Push a tag with prepared release details. This will trigger the actual release `vX.X.0`. - - `git tag -a vX.X.0 -m 'Release vX.X.0'` - - `git push origin vX.X.0` -7. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`. -8. Create the long-lived minor release branch `RC0/vX.X.1` for the next point release on this - new major release series. - -##### Major Release (from `master`) - -1. Start on `master` -2. Run integration tests (see `test_integrations` in Makefile) -3. Prepare release in a pull request against `master` (to be squash merged): - - Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md`; if this release - had release candidates, squash all the RC updates into one - - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for - all issues - - Run `bash ./scripts/authors.sh` to get a list of authors since the latest - release, and add the github aliases of external contributors to the top of - the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh ` - - Reset the `CHANGELOG_PENDING.md` - - Bump TMVersionDefault version in `version.go` - - Bump P2P and block protocol versions in `version.go`, if necessary - - Bump ABCI protocol version in `version.go`, if necessary - - Make sure all significant breaking changes are covered in `UPGRADING.md` - - Add any release notes you would like to be added to the body of the release to `release_notes.md`. -4. Push a tag with prepared release details (this will trigger the release `vX.X.0`) - - `git tag -a vX.X.x -m 'Release vX.X.x'` - - `git push origin vX.X.x` -5. Update the `CHANGELOG.md` file on master with the releases changelog. -6. Delete any RC branches and tags for this release (if applicable) - -#### Minor Release (Point Releases) - -Minor releases are done differently from major releases: They are built off of long-lived backport branches, rather than from master. -Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, and -the backport branches have names like `v0.34.x` or `v0.33.x` (literally, `x`; it is not a placeholder in this case). - -As non-breaking changes land on `master`, they should also be backported (cherry-picked) to these backport branches. - -We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport to the needed branch. Depending on which backport branch you need to backport to there will be labels for them. To notify the bot to backport a pull request, mark the pull request with the label `backport-to-`. Once the original pull request is merged, the bot will try to cherry-pick the pull request to the backport branch. If the bot fails to backport, it will open a pull request. The author of the original pull request is responsible for solving the conflicts and merging the pull request. - -Minor releases don't have release candidates by default, although any tricky changes may merit a release candidate. - -To create a minor release: - -1. Checkout the long-lived backport branch: `git checkout vX.X.x` -2. Run integration tests: `make test_integrations` -3. Check out a new branch and prepare the release: - - Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md` - - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues - - Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh ` - - Reset the `CHANGELOG_PENDING.md` - - Bump the ABCI version number, if necessary. - (Note that ABCI follows semver, and that ABCI versions are the only versions - which can change during minor releases, and only field additions are valid minor changes.) - - Add any release notes you would like to be added to the body of the release to `release_notes.md`. -4. Open a PR with these changes that will land them back on `vX.X.x` -5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag. - - `git tag -a vX.X.x -m 'Release vX.X.x'` - - `git push origin vX.X.x` -6. Create a pull request back to master with the CHANGELOG & version changes from the latest release. - - Remove all `R:minor` labels from the pull requests that were included in the release. - - Do not merge the backport branch into master. - -#### Release Candidates - -Before creating an official release, especially a major release, we may want to create a -release candidate (RC) for our friends and partners to test out. We use git tags to -create RCs, and we build them off of RC branches. RC branches typically have names formatted -like `RCX/vX.X.X` (or, concretely, `RC0/v0.34.0`), while the tags themselves follow -the "standard" release naming conventions, with `-rcX` at the end (`vX.X.X-rcX`). - -(Note that branches and tags _cannot_ have the same names, so it's important that these branches -have distinct names from the tags/release names.) - -1. Start from the RC branch (e.g. `RC0/v0.34.0`). -2. Create the new tag, specifying a name and a tag "message": - `git tag -a v0.34.0-rc0 -m "Release Candidate v0.34.0-rc0` -3. Push the tag back up to origin: - `git push origin v0.34.0-rc4` - Now the tag should be available on the repo's releases page. -4. Create a new release candidate branch for any possible updates to the RC: - `git checkout -b RC1/v0.34.0; git push origin RC1/v0.34.0` - ## Testing ### Unit tests diff --git a/Makefile b/Makefile index a509f3a26..2bac7f5bf 100644 --- a/Makefile +++ b/Makefile @@ -89,7 +89,7 @@ proto-gen: .PHONY: proto-gen proto-lint: - @$(DOCKER_BUF) check lint --error-format=json + @$(DOCKER_BUF) lint --error-format=json .PHONY: proto-lint proto-format: @@ -98,11 +98,11 @@ proto-format: .PHONY: proto-format proto-check-breaking: - @$(DOCKER_BUF) check breaking --against-input .git#branch=master + @$(DOCKER_BUF) breaking --against .git#branch=master .PHONY: proto-check-breaking proto-check-breaking-ci: - @$(DOCKER_BUF) check breaking --against-input $(HTTPS_GIT)#branch=master + @$(DOCKER_BUF) breaking --against $(HTTPS_GIT)#branch=master .PHONY: proto-check-breaking-ci ############################################################################### @@ -131,11 +131,11 @@ generate_test_cert: # generate server cerificate @certstrap request-cert -cn server -ip 127.0.0.1 # self-sign server cerificate with rootCA - @certstrap sign server --CA "root CA" + @certstrap sign server --CA "root CA" # generate client cerificate @certstrap request-cert -cn client -ip 127.0.0.1 # self-sign client cerificate with rootCA - @certstrap sign client --CA "root CA" + @certstrap sign client --CA "root CA" .PHONY: generate_test_cert ############################################################################### @@ -214,7 +214,7 @@ DESTINATION = ./index.html.md build-docs: @cd docs && \ while read -r branch path_prefix; do \ - (git checkout $${branch} && npm install && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \ + (git checkout $${branch} && npm ci && VUEPRESS_BASE="/$${path_prefix}/" npm run build) ; \ mkdir -p ~/output/$${path_prefix} ; \ cp -r .vuepress/dist/* ~/output/$${path_prefix}/ ; \ cp ~/output/$${path_prefix}/index.html ~/output ; \ @@ -227,13 +227,13 @@ build-docs: build-docker: build-linux cp $(BUILDDIR)/tendermint DOCKER/tendermint - docker build --label=tendermint --tag="tendermint/tendermint" DOCKER + docker build --label=tendermint --tag="tendermint/tendermint" -f DOCKER/Dockerfile . rm -rf DOCKER/tendermint .PHONY: build-docker ############################################################################### -### Mocks ### +### Mocks ### ############################################################################### mockery: diff --git a/README.md b/README.md index d1e1df6dd..f0da8f484 100644 --- a/README.md +++ b/README.md @@ -9,7 +9,7 @@ Or [Blockchain](), for shor [![version](https://img.shields.io/github/tag/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/releases/latest) [![API Reference](https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667)](https://pkg.go.dev/github.com/tendermint/tendermint) [![Go version](https://img.shields.io/badge/go-1.16-blue.svg)](https://github.com/moovweb/gvm) -[![Discord chat](https://img.shields.io/discord/669268347736686612.svg)](https://discord.gg/vcExX9T) +[![Discord chat](https://img.shields.io/discord/669268347736686612.svg)](https://discord.gg/cosmosnetwork) [![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE) [![tendermint/tendermint](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint) [![Sourcegraph](https://sourcegraph.com/github.com/tendermint/tendermint/-/badge.svg)](https://sourcegraph.com/github.com/tendermint/tendermint?badge) @@ -33,7 +33,9 @@ Tendermint has been in the production of private and public environments, most n See below for more details about [versioning](#versioning). In any case, if you intend to run Tendermint in production, we're happy to help. You can -contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/vcExX9T). +contact us [over email](mailto:hello@interchain.berlin) or [join the chat](https://discord.gg/cosmosnetwork). + +More on how releases are conducted can be found [here](./RELEASES.md). ## Security @@ -82,32 +84,12 @@ and familiarize yourself with our Tendermint uses [Semantic Versioning](http://semver.org/) to determine when and how the version changes. According to SemVer, anything in the public API can change at any time before version 1.0.0 -To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used -to signal breaking changes across a subset of the total public API. This subset includes all -interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not -include the Go APIs. +To provide some stability to users of 0.X.X versions of Tendermint, the MINOR version is used +to signal breaking changes across Tendermint's API. This API includes all +publicly exposed types, functions, and methods in non-internal Go packages as well as +the types and methods accessible via the Tendermint RPC interface. -That said, breaking changes in the following packages will be documented in the -CHANGELOG even if they don't lead to MINOR version bumps: - -- crypto -- config -- libs - - bits - - bytes - - json - - log - - math - - net - - os - - protoio - - rand - - sync - - strings - - service -- node -- rpc/client -- types +Breaking changes to these public APIs will be documented in the CHANGELOG. ### Upgrades @@ -132,6 +114,8 @@ in [UPGRADING.md](./UPGRADING.md). ### Tendermint Core +We keep a public up-to-date version of our roadmap [here](./docs/roadmap/roadmap.md) + For details about the blockchain data structures and the p2p protocols, see the [Tendermint specification](https://docs.tendermint.com/master/spec/). diff --git a/RELEASES.md b/RELEASES.md new file mode 100644 index 000000000..8d9bc2b8e --- /dev/null +++ b/RELEASES.md @@ -0,0 +1,161 @@ +# Releases + +Tendermint uses [semantic versioning](https://semver.org/) with each release following +a `vX.Y.Z` format. The `master` branch is used for active development and thus it's +advisable not to build against it. + +The latest changes are always initially merged into `master`. +Releases are specified using tags and are built from long-lived "backport" branches +that are cut from `master` when the release process begins. +Each release "line" (e.g. 0.34 or 0.33) has its own long-lived backport branch, +and the backport branches have names like `v0.34.x` or `v0.33.x` +(literally, `x`; it is not a placeholder in this case). Tendermint only +maintains the last two releases at a time (the oldest release is predominantly +just security patches). + +## Backporting + +As non-breaking changes land on `master`, they should also be backported +to these backport branches. + +We use Mergify's [backport feature](https://mergify.io/features/backports) to automatically backport +to the needed branch. There should be a label for any backport branch that you'll be targeting. +To notify the bot to backport a pull request, mark the pull request with the label corresponding +to the correct backport branch. For example, to backport to v0.35.x, add the label `S:backport-to-v0.35.x`. +Once the original pull request is merged, the bot will try to cherry-pick the pull request +to the backport branch. If the bot fails to backport, it will open a pull request. +The author of the original pull request is responsible for solving the conflicts and +merging the pull request. + +### Creating a backport branch + +If this is the first release candidate for a major release, you get to have the honor of creating +the backport branch! + +Note that, after creating the backport branch, you'll also need to update the +tags on `master` so that `go mod` is able to order the branches correctly. You +should tag `master` with a "dev" tag that is "greater than" the backport +branches tags. See [#6072](https://github.com/tendermint/tendermint/pull/6072) +for more context. + +In the following example, we'll assume that we're making a backport branch for +the 0.35.x line. + +1. Start on `master` +2. Create and push the backport branch: + `git checkout -b v0.35.x; git push origin v0.35.x` +3. Go back to master and tag it as the dev branch for the _next_ major release and push it back up: + `git tag -a v0.36.0-dev -m "Development base for Tendermint v0.36."; git push origin v0.36.0-dev` +4. Create a new workflow (still on master) to run e2e nightlies for the new backport branch. + (See https://github.com/tendermint/tendermint/blob/master/.github/workflows/e2e-nightly-master.yml + for an example.) +5. Add a new section to the Mergify config (`.github/mergify.yml`) to enable the + backport bot to work on this branch, and add a corresponding `S:backport-to-v0.35.x` + [label](https://github.com/tendermint/tendermint/labels) so the bot can be triggered. + +## Release candidates + +Before creating an official release, especially a major release, we may want to create a +release candidate (RC) for our friends and partners to test out. We use git tags to +create RCs, and we build them off of backport branches. + +Tags for RCs should follow the "standard" release naming conventions, with `-rcX` at the end +(for example, `v0.35.0-rc0`). + +(Note that branches and tags _cannot_ have the same names, so it's important that these branches +have distinct names from the tags/release names.) + +If this is the first RC for a major release, you'll have to make a new backport branch (see above). +Otherwise: + +1. Start from the backport branch (e.g. `v0.35.x`). +2. Run the integration tests and the e2e nightlies + (which can be triggered from the Github UI; + e.g., https://github.com/tendermint/tendermint/actions/workflows/e2e-nightly-34x.yml). +3. Prepare the changelog: + - Move the changes included in `CHANGELOG_PENDING.md` into `CHANGELOG.md`. Each RC should have + it's own changelog section. These will be squashed when the final candidate is released. + - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for + all PRs + - Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes + or other upgrading flows. + - Bump TMVersionDefault version in `version.go` + - Bump P2P and block protocol versions in `version.go`, if necessary. + Check the changelog for breaking changes in these components. + - Bump ABCI protocol version in `version.go`, if necessary +4. Open a PR with these changes against the backport branch. +5. Once these changes have landed on the backport branch, be sure to pull them back down locally. +6. Once you have the changes locally, create the new tag, specifying a name and a tag "message": + `git tag -a v0.35.0-rc0 -m "Release Candidate v0.35.0-rc0` +7. Push the tag back up to origin: + `git push origin v0.35.0-rc0` + Now the tag should be available on the repo's releases page. +8. Future RCs will continue to be built off of this branch. + +Note that this process should only be used for "true" RCs-- +release candidates that, if successful, will be the next release. +For more experimental "RCs," create a new, short-lived branch and tag that instead. + +## Major release + +This major release process assumes that this release was preceded by release candidates. +If there were no release candidates, begin by creating a backport branch, as described above. + +1. Start on the backport branch (e.g. `v0.35.x`) +2. Run integration tests (`make test_integrations`) and the e2e nightlies. +3. Prepare the release: + - "Squash" changes from the changelog entries for the RCs into a single entry, + and add all changes included in `CHANGELOG_PENDING.md`. + (Squashing includes both combining all entries, as well as removing or simplifying + any intra-RC changes. It may also help to alphabetize the entries by package name.) + - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for + all PRs + - Ensure that `UPGRADING.md` is up-to-date and includes notes on any breaking changes + or other upgrading flows. + - Bump TMVersionDefault version in `version.go` + - Bump P2P and block protocol versions in `version.go`, if necessary + - Bump ABCI protocol version in `version.go`, if necessary +4. Open a PR with these changes against the backport branch. +5. Once these changes are on the backport branch, push a tag with prepared release details. + This will trigger the actual release `v0.35.0`. + - `git tag -a v0.35.0 -m 'Release v0.35.0'` + - `git push origin v0.35.0` +6. Make sure that `master` is updated with the latest `CHANGELOG.md`, `CHANGELOG_PENDING.md`, and `UPGRADING.md`. +7. Add the release to the documentation site generator config (see + [DOCS_README.md](./docs/DOCS_README.md) for more details). In summary: + - Start on branch `master`. + - Add a new line at the bottom of [`docs/versions`](./docs/versions) to + ensure the newest release is the default for the landing page. + - Add a new entry to `themeConfig.versions` in + [`docs/.vuepress/config.js`](./docs/.vuepress/config.js) to include the + release in the dropdown versions menu. + +## Minor release (point releases) + +Minor releases are done differently from major releases: They are built off of +long-lived backport branches, rather than from master. As non-breaking changes +land on `master`, they should also be backported into these backport branches. + +Minor releases don't have release candidates by default, although any tricky +changes may merit a release candidate. + +To create a minor release: + +1. Checkout the long-lived backport branch: `git checkout v0.35.x` +2. Run integration tests (`make test_integrations`) and the nightlies. +3. Check out a new branch and prepare the release: + - Copy `CHANGELOG_PENDING.md` to top of `CHANGELOG.md` + - Run `python ./scripts/linkify_changelog.py CHANGELOG.md` to add links for all issues + - Run `bash ./scripts/authors.sh` to get a list of authors since the latest release, and add the GitHub aliases of external contributors to the top of the CHANGELOG. To lookup an alias from an email, try `bash ./scripts/authors.sh ` + - Reset the `CHANGELOG_PENDING.md` + - Bump the TMDefaultVersion in `version.go` + - Bump the ABCI version number, if necessary. + (Note that ABCI follows semver, and that ABCI versions are the only versions + which can change during minor releases, and only field additions are valid minor changes.) +4. Open a PR with these changes that will land them back on `v0.35.x` +5. Once this change has landed on the backport branch, make sure to pull it locally, then push a tag. + - `git tag -a v0.35.1 -m 'Release v0.35.1'` + - `git push origin v0.35.1` +6. Create a pull request back to master with the CHANGELOG & version changes from the latest release. + - Remove all `R:minor` labels from the pull requests that were included in the release. + - Do not merge the backport branch into master. diff --git a/UPGRADING.md b/UPGRADING.md index cc721af8e..99efdf225 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -2,7 +2,7 @@ This guide provides instructions for upgrading to specific versions of Tendermint Core. -## Unreleased +## v0.35 ### ABCI Changes @@ -17,17 +17,26 @@ This guide provides instructions for upgrading to specific versions of Tendermin ### Config Changes -* `fast_sync = "v1"` and `fast_sync = "v2"` are no longer supported. Please use `v0` instead. +* The configuration file field `[fastsync]` has been renamed to `[blocksync]`. + +* The top level configuration file field `fast-sync` has moved under the new `[blocksync]` + field as `blocksync.enable`. + +* `blocksync.version = "v1"` and `blocksync.version = "v2"` (previously `fastsync`) + are no longer supported. Please use `v0` instead. During the v0.35 release cycle, `v0` was + determined to suit the existing needs and the cost of maintaining the `v1` and `v2` modules + was determined to be greater than necessary. + * All config parameters are now hyphen-case (also known as kebab-case) instead of snake_case. Before restarting the node make sure you have updated all the variables in your `config.toml` file. * Added `--mode` flag and `mode` config variable on `config.toml` for setting Mode of the Node: `full` | `validator` | `seed` (default: `full`) [ADR-52](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) - + * `BootstrapPeers` has been added as part of the new p2p stack. This will eventually replace `Seeds`. Bootstrap peers are connected with on startup if needed for peer discovery. Unlike - persistent peers, there's no gaurantee that the node will remain connected with these peers. + persistent peers, there's no gaurantee that the node will remain connected with these peers. * configuration values starting with `priv-validator-` have moved to the new `priv-validator` section, without the `priv-validator-` prefix. @@ -35,10 +44,33 @@ This guide provides instructions for upgrading to specific versions of Tendermin * The fast sync process as well as the blockchain package and service has all been renamed to block sync +### Database Key Format Changes + +The format of all tendermint on-disk database keys changes in +0.35. Upgrading nodes must either re-sync all data or run a migration +script provided in this release. The script located in +`github.com/tendermint/tendermint/scripts/keymigrate/migrate.go` +provides the function `Migrate(context.Context, db.DB)` which you can +operationalize as makes sense for your deployment. + +For ease of use the `tendermint` command includes a CLI version of the +migration script, which you can invoke, as in: + + tendermint key-migrate + +This reads the configuration file as normal and allows the +`--db-backend` and `--db-dir` flags to change database operations as +needed. + +The migration operation is idempotent and can be run more than once, +if needed. + ### CLI Changes * You must now specify the node mode (validator|full|seed) in `tendermint init [mode]` +* The `--fast-sync` command line option has been renamed to `--blocksync.enable` + * If you had previously used `tendermint gen_node_key` to generate a new node key, keep in mind that it no longer saves the output to a file. You can use `tendermint init validator` or pipe the output of `tendermint gen_node_key` to @@ -53,8 +85,8 @@ This guide provides instructions for upgrading to specific versions of Tendermin ### API Changes -The p2p layer was reimplemented as part of the 0.35 release cycle, and -all reactors were refactored. As part of that work these +The p2p layer was reimplemented as part of the 0.35 release cycle and +all reactors were refactored to accomodate the change. As part of that work these implementations moved into the `internal` package and are no longer considered part of the public Go API of tendermint. These packages are: @@ -66,7 +98,7 @@ are: - `blockchain` - `evidence` -Accordingly, the space `node` package was changed to reduce access to +Accordingly, the `node` package was changed to reduce access to tendermint internals: applications that use tendermint as a library will need to change to accommodate these changes. Most notably: @@ -77,10 +109,84 @@ will need to change to accommodate these changes. Most notably: longer exported and have been replaced with `node.New` and `node.NewDefault` which provide more functional interfaces. -### RPC changes +### gRPC Support Mark gRPC in the RPC layer as deprecated and to be removed in 0.36. +### Peer Management Interface + +When running with the new P2P Layer, the methods `UnsafeDialSeeds` and +`UnsafeDialPeers` RPC methods will always return an error. They are +deprecated and will be removed in 0.36 when the legacy peer stack is +removed. + +Additionally the format of the Peer list returned in the `NetInfo` +method changes in this release to accommodate the different way that +the new stack tracks data about peers. This change affects users of +both stacks. + +### Using the updated p2p library + +The P2P library was reimplemented in this release. The new implementation is +enabled by default in this version of Tendermint. The legacy implementation is still +included in this version of Tendermint as a backstop to work around unforeseen +production issues. The new and legacy version are interoperable. If necessary, +you can enable the legacy implementation in the server configuration file. + +To make use of the legacy P2P implemementation add or update the following field of +your server's configuration file under the `[p2p]` section: + +```toml +[p2p] +... +use-legacy = true +... +``` + +If you need to do this, please consider filing an issue in the Tendermint repository +to let us know why. We plan to remove the legacy P2P code in the next (v0.36) release. + +#### New p2p queue types + +The new p2p implementation enables selection of the queue type to be used for +passing messages between peers. + +The following values may be used when selecting which queue type to use: + +* `fifo`: (**default**) An unbuffered and lossless queue that passes messages through +in the order in which they were received. + +* `priority`: A priority queue of messages. + +* `wdrr`: A queue implementing the Weighted Deficit Round Robin algorithm. A +weighted deficit round robin queue is created per peer. Each queue contains a +separate 'flow' for each of the channels of communication that exist between any two +peers. Tendermint maintains a channel per message type between peers. Each WDRR +queue maintains a shared buffered with a fixed capacity through which messages on different +flows are passed. +For more information on WDRR scheduling, see: https://en.wikipedia.org/wiki/Deficit_round_robin + +To select a queue type, add or update the following field under the `[p2p]` +section of your server's configuration file. + +```toml +[p2p] +... +queue-type = wdrr +... +``` + + +### Support for Custom Reactor and Mempool Implementations + +The changes to p2p layer removed existing support for custom +reactors. Based on our understanding of how this functionality was +used, the introduction of the prioritized mempool covers nearly all of +the use cases for custom reactors. If you are currently running custom +reactors and mempools and are having trouble seeing the migration path +for your project please feel free to reach out to the Tendermint Core +development team directly. + ## v0.34.0 **Upgrading to Tendermint 0.34 requires a blockchain restart.** @@ -234,8 +340,8 @@ Other user-relevant changes include: * The old `lite` package was removed; the new light client uses the `light` package. * The `Verifier` was broken up into two pieces: - * Core verification logic (pure `VerifyX` functions) - * `Client` object, which represents the complete light client + * Core verification logic (pure `VerifyX` functions) + * `Client` object, which represents the complete light client * The new light clients stores headers & validator sets as `LightBlock`s * The RPC client can be found in the `/rpc` directory. * The HTTP(S) proxy is located in the `/proxy` directory. @@ -367,12 +473,12 @@ Evidence Params has been changed to include duration. ### Go API * `libs/common` has been removed in favor of specific pkgs. - * `async` - * `service` - * `rand` - * `net` - * `strings` - * `cmap` + * `async` + * `service` + * `rand` + * `net` + * `strings` + * `cmap` * removal of `errors` pkg ### RPC Changes @@ -441,9 +547,9 @@ Prior to the update, suppose your `ResponseDeliverTx` look like: ```go abci.ResponseDeliverTx{ Tags: []kv.Pair{ - {Key: []byte("sender"), Value: []byte("foo")}, - {Key: []byte("recipient"), Value: []byte("bar")}, - {Key: []byte("amount"), Value: []byte("35")}, + {Key: []byte("sender"), Value: []byte("foo")}, + {Key: []byte("recipient"), Value: []byte("bar")}, + {Key: []byte("amount"), Value: []byte("35")}, } } ``` @@ -462,14 +568,14 @@ the following `Events`: ```go abci.ResponseDeliverTx{ Events: []abci.Event{ - { - Type: "transfer", - Attributes: kv.Pairs{ - {Key: []byte("sender"), Value: []byte("foo")}, - {Key: []byte("recipient"), Value: []byte("bar")}, - {Key: []byte("amount"), Value: []byte("35")}, - }, - } + { + Type: "transfer", + Attributes: kv.Pairs{ + {Key: []byte("sender"), Value: []byte("foo")}, + {Key: []byte("recipient"), Value: []byte("bar")}, + {Key: []byte("amount"), Value: []byte("35")}, + }, + } } ``` @@ -517,9 +623,9 @@ In this case, the WS client will receive an error with description: "jsonrpc": "2.0", "id": "{ID}#event", "error": { - "code": -32000, - "msg": "Server error", - "data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)" + "code": -32000, + "msg": "Server error", + "data": "subscription was canceled (reason: client is not pulling messages fast enough)" // or "subscription was canceled (reason: Tendermint exited)" } } @@ -725,9 +831,9 @@ just the `Data` field set: ```go []ProofOp{ - ProofOp{ - Data: , - } + ProofOp{ + Data: , + } } ``` diff --git a/abci/client/client.go b/abci/client/client.go index b6d34e422..a38c7f81b 100644 --- a/abci/client/client.go +++ b/abci/client/client.go @@ -1,4 +1,4 @@ -package abcicli +package abciclient import ( "context" @@ -87,7 +87,7 @@ type ReqRes struct { *sync.WaitGroup *types.Response // Not set atomically, so be sure to use WaitGroup. - mtx tmsync.RWMutex + mtx tmsync.Mutex done bool // Gets set to true once *after* WaitGroup.Done(). cb func(*types.Response) // A single callback that may be set. } @@ -137,16 +137,16 @@ func (r *ReqRes) InvokeCallback() { // // ref: https://github.com/tendermint/tendermint/issues/5439 func (r *ReqRes) GetCallback() func(*types.Response) { - r.mtx.RLock() - defer r.mtx.RUnlock() + r.mtx.Lock() + defer r.mtx.Unlock() return r.cb } // SetDone marks the ReqRes object as done. func (r *ReqRes) SetDone() { r.mtx.Lock() - defer r.mtx.Unlock() r.done = true + r.mtx.Unlock() } func waitGroup1() (wg *sync.WaitGroup) { diff --git a/abci/client/creators.go b/abci/client/creators.go new file mode 100644 index 000000000..e17b15eca --- /dev/null +++ b/abci/client/creators.go @@ -0,0 +1,35 @@ +package abciclient + +import ( + "fmt" + + "github.com/tendermint/tendermint/abci/types" + tmsync "github.com/tendermint/tendermint/internal/libs/sync" +) + +// Creator creates new ABCI clients. +type Creator func() (Client, error) + +// NewLocalCreator returns a Creator for the given app, +// which will be running locally. +func NewLocalCreator(app types.Application) Creator { + mtx := new(tmsync.Mutex) + + return func() (Client, error) { + return NewLocalClient(mtx, app), nil + } +} + +// NewRemoteCreator returns a Creator for the given address (e.g. +// "192.168.0.1") and transport (e.g. "tcp"). Set mustConnect to true if you +// want the client to connect before reporting success. +func NewRemoteCreator(addr, transport string, mustConnect bool) Creator { + return func() (Client, error) { + remoteApp, err := NewClient(addr, transport, mustConnect) + if err != nil { + return nil, fmt.Errorf("failed to connect to proxy: %w", err) + } + + return remoteApp, nil + } +} diff --git a/abci/client/doc.go b/abci/client/doc.go index eac40fe11..fd5a17075 100644 --- a/abci/client/doc.go +++ b/abci/client/doc.go @@ -1,4 +1,4 @@ -// Package abcicli provides an ABCI implementation in Go. +// Package abciclient provides an ABCI implementation in Go. // // There are 3 clients available: // 1. socket (unix or TCP) @@ -26,4 +26,4 @@ // // sync: waits for all Async calls to complete (essentially what Flush does in // the socket client) and calls Sync method. -package abcicli +package abciclient diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index 31bd6fae1..f1123fab5 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -1,4 +1,4 @@ -package abcicli +package abciclient import ( "context" @@ -24,7 +24,7 @@ type grpcClient struct { conn *grpc.ClientConn chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool - mtx tmsync.RWMutex + mtx tmsync.Mutex addr string err error resCb func(*types.Request, *types.Response) // listens to all callbacks @@ -149,8 +149,8 @@ func (cli *grpcClient) StopForError(err error) { } func (cli *grpcClient) Error() error { - cli.mtx.RLock() - defer cli.mtx.RUnlock() + cli.mtx.Lock() + defer cli.mtx.Unlock() return cli.err } @@ -158,8 +158,8 @@ func (cli *grpcClient) Error() error { // NOTE: callback may get internally generated flush responses. func (cli *grpcClient) SetResponseCallback(resCb Callback) { cli.mtx.Lock() - defer cli.mtx.Unlock() cli.resCb = resCb + cli.mtx.Unlock() } //---------------------------------------- diff --git a/abci/client/local_client.go b/abci/client/local_client.go index 69457b5b0..701108a3c 100644 --- a/abci/client/local_client.go +++ b/abci/client/local_client.go @@ -1,4 +1,4 @@ -package abcicli +package abciclient import ( "context" @@ -15,7 +15,7 @@ import ( type localClient struct { service.BaseService - mtx *tmsync.RWMutex + mtx *tmsync.Mutex types.Application Callback } @@ -26,24 +26,22 @@ var _ Client = (*localClient)(nil) // methods of the given app. // // Both Async and Sync methods ignore the given context.Context parameter. -func NewLocalClient(mtx *tmsync.RWMutex, app types.Application) Client { +func NewLocalClient(mtx *tmsync.Mutex, app types.Application) Client { if mtx == nil { - mtx = &tmsync.RWMutex{} + mtx = new(tmsync.Mutex) } - cli := &localClient{ mtx: mtx, Application: app, } - cli.BaseService = *service.NewBaseService(nil, "localClient", cli) return cli } func (app *localClient) SetResponseCallback(cb Callback) { app.mtx.Lock() - defer app.mtx.Unlock() app.Callback = cb + app.mtx.Unlock() } // TODO: change types.Application to include Error()? @@ -67,8 +65,8 @@ func (app *localClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, err } func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) { - app.mtx.RLock() - defer app.mtx.RUnlock() + app.mtx.Lock() + defer app.mtx.Unlock() res := app.Application.Info(req) return app.callback( @@ -100,8 +98,8 @@ func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheck } func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) { - app.mtx.RLock() - defer app.mtx.RUnlock() + app.mtx.Lock() + defer app.mtx.Unlock() res := app.Application.Query(req) return app.callback( @@ -215,8 +213,8 @@ func (app *localClient) EchoSync(ctx context.Context, msg string) (*types.Respon } func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { - app.mtx.RLock() - defer app.mtx.RUnlock() + app.mtx.Lock() + defer app.mtx.Unlock() res := app.Application.Info(req) return &res, nil @@ -249,8 +247,8 @@ func (app *localClient) QuerySync( ctx context.Context, req types.RequestQuery, ) (*types.ResponseQuery, error) { - app.mtx.RLock() - defer app.mtx.RUnlock() + app.mtx.Lock() + defer app.mtx.Unlock() res := app.Application.Query(req) return &res, nil diff --git a/abci/client/mocks/client.go b/abci/client/mocks/client.go index 6726ce95e..664646e61 100644 --- a/abci/client/mocks/client.go +++ b/abci/client/mocks/client.go @@ -5,7 +5,7 @@ package mocks import ( context "context" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" log "github.com/tendermint/tendermint/libs/log" @@ -20,15 +20,15 @@ type Client struct { } // ApplySnapshotChunkAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abcicli.ReqRes, error) { +func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -66,15 +66,15 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA } // BeginBlockAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abcicli.ReqRes, error) { +func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -112,15 +112,15 @@ func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBloc } // CheckTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) { +func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -158,15 +158,15 @@ func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*t } // CommitAsync provides a mock function with given fields: _a0 -func (_m *Client) CommitAsync(_a0 context.Context) (*abcicli.ReqRes, error) { +func (_m *Client) CommitAsync(_a0 context.Context) (*abciclient.ReqRes, error) { ret := _m.Called(_a0) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context) *abciclient.ReqRes); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -204,15 +204,15 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) } // DeliverTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) { +func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -250,15 +250,15 @@ func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) } // EchoAsync provides a mock function with given fields: ctx, msg -func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) { +func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abciclient.ReqRes, error) { ret := _m.Called(ctx, msg) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, string) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, string) *abciclient.ReqRes); ok { r0 = rf(ctx, msg) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -296,15 +296,15 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho } // EndBlockAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abcicli.ReqRes, error) { +func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -356,15 +356,15 @@ func (_m *Client) Error() error { } // FlushAsync provides a mock function with given fields: _a0 -func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) { +func (_m *Client) FlushAsync(_a0 context.Context) (*abciclient.ReqRes, error) { ret := _m.Called(_a0) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context) *abciclient.ReqRes); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -393,15 +393,15 @@ func (_m *Client) FlushSync(_a0 context.Context) error { } // InfoAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abcicli.ReqRes, error) { +func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -439,15 +439,15 @@ func (_m *Client) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.R } // InitChainAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abcicli.ReqRes, error) { +func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -499,15 +499,15 @@ func (_m *Client) IsRunning() bool { } // ListSnapshotsAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abcicli.ReqRes, error) { +func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -545,15 +545,15 @@ func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSn } // LoadSnapshotChunkAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abcicli.ReqRes, error) { +func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -591,15 +591,15 @@ func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLo } // OfferSnapshotAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abcicli.ReqRes, error) { +func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -670,15 +670,15 @@ func (_m *Client) OnStop() { } // QueryAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abcicli.ReqRes, error) { +func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -751,7 +751,7 @@ func (_m *Client) SetLogger(_a0 log.Logger) { } // SetResponseCallback provides a mock function with given fields: _a0 -func (_m *Client) SetResponseCallback(_a0 abcicli.Callback) { +func (_m *Client) SetResponseCallback(_a0 abciclient.Callback) { _m.Called(_a0) } diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 3fef8540d..726c554d4 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -1,4 +1,4 @@ -package abcicli +package abciclient import ( "bufio" @@ -13,7 +13,6 @@ import ( "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/libs/timer" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" ) @@ -22,8 +21,6 @@ const ( // reqQueueSize is the max number of queued async requests. // (memory: 256MB max assuming 1MB transactions) reqQueueSize = 256 - // Don't wait longer than... - flushThrottleMS = 20 ) type reqResWithContext struct { @@ -40,10 +37,9 @@ type socketClient struct { mustConnect bool conn net.Conn - reqQueue chan *reqResWithContext - flushTimer *timer.ThrottleTimer + reqQueue chan *reqResWithContext - mtx tmsync.RWMutex + mtx tmsync.Mutex err error reqSent *list.List // list of requests sent, waiting for response resCb func(*types.Request, *types.Response) // called on all requests, if set. @@ -57,7 +53,6 @@ var _ Client = (*socketClient)(nil) func NewSocketClient(addr string, mustConnect bool) Client { cli := &socketClient{ reqQueue: make(chan *reqResWithContext, reqQueueSize), - flushTimer: timer.NewThrottleTimer("socketClient", flushThrottleMS), mustConnect: mustConnect, addr: addr, @@ -102,14 +97,13 @@ func (cli *socketClient) OnStop() { cli.conn.Close() } - cli.flushQueue() - cli.flushTimer.Stop() + cli.drainQueue() } // Error returns an error if the client was stopped abruptly. func (cli *socketClient) Error() error { - cli.mtx.RLock() - defer cli.mtx.RUnlock() + cli.mtx.Lock() + defer cli.mtx.Unlock() return cli.err } @@ -119,45 +113,32 @@ func (cli *socketClient) Error() error { // NOTE: callback may get internally generated flush responses. func (cli *socketClient) SetResponseCallback(resCb Callback) { cli.mtx.Lock() - defer cli.mtx.Unlock() cli.resCb = resCb + cli.mtx.Unlock() } //---------------------------------------- func (cli *socketClient) sendRequestsRoutine(conn io.Writer) { - w := bufio.NewWriter(conn) + bw := bufio.NewWriter(conn) for { select { case reqres := <-cli.reqQueue: - // cli.Logger.Debug("Sent request", "requestType", reflect.TypeOf(reqres.Request), "request", reqres.Request) - if reqres.C.Err() != nil { cli.Logger.Debug("Request's context is done", "req", reqres.R, "err", reqres.C.Err()) continue } - cli.willSendReq(reqres.R) - err := types.WriteMessage(reqres.R.Request, w) - if err != nil { + + if err := types.WriteMessage(reqres.R.Request, bw); err != nil { cli.stopForError(fmt.Errorf("write to buffer: %w", err)) return } + if err := bw.Flush(); err != nil { + cli.stopForError(fmt.Errorf("flush buffer: %w", err)) + return + } - // If it's a flush request, flush the current buffer. - if _, ok := reqres.R.Request.Value.(*types.Request_Flush); ok { - err = w.Flush() - if err != nil { - cli.stopForError(fmt.Errorf("flush buffer: %w", err)) - return - } - } - case <-cli.flushTimer.Ch: // flush queue - select { - case cli.reqQueue <- &reqResWithContext{R: NewReqRes(types.ToRequestFlush()), C: context.Background()}: - default: - // Probably will fill the buffer, or retry later. - } case <-cli.Quit(): return } @@ -492,14 +473,6 @@ func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, s } } - // Maybe auto-flush, or unset auto-flush - switch req.Value.(type) { - case *types.Request_Flush: - cli.flushTimer.Unset() - default: - cli.flushTimer.Set() - } - return reqres, nil } @@ -537,7 +510,9 @@ func queueErr(e error) error { return fmt.Errorf("can't queue req: %w", e) } -func (cli *socketClient) flushQueue() { +// drainQueue marks as complete and discards all remaining pending requests +// from the queue. +func (cli *socketClient) drainQueue() { cli.mtx.Lock() defer cli.mtx.Unlock() @@ -547,14 +522,17 @@ func (cli *socketClient) flushQueue() { reqres.Done() } - // mark all queued messages as resolved -LOOP: + // Mark all queued messages as resolved. + // + // TODO(creachadair): We can't simply range the channel, because it is never + // closed, and the writer continues to add work. + // See https://github.com/tendermint/tendermint/issues/6996. for { select { case reqres := <-cli.reqQueue: reqres.R.Done() default: - break LOOP + return } } } diff --git a/abci/client/socket_client_test.go b/abci/client/socket_client_test.go index d61d729e1..53ba7b672 100644 --- a/abci/client/socket_client_test.go +++ b/abci/client/socket_client_test.go @@ -1,4 +1,4 @@ -package abcicli_test +package abciclient_test import ( "context" @@ -11,7 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/service" @@ -100,7 +100,7 @@ func TestHangingSyncCalls(t *testing.T) { } func setupClientServer(t *testing.T, app types.Application) ( - service.Service, abcicli.Client) { + service.Service, abciclient.Client) { // some port between 20k and 30k port := 20000 + rand.Int31()%10000 addr := fmt.Sprintf("localhost:%d", port) @@ -110,7 +110,7 @@ func setupClientServer(t *testing.T, app types.Application) ( err = s.Start() require.NoError(t, err) - c := abcicli.NewSocketClient(addr, true) + c := abciclient.NewSocketClient(addr, true) err = c.Start() require.NoError(t, err) diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index b9af27e22..9fae6fc05 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -15,7 +15,7 @@ import ( "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/server" @@ -27,7 +27,7 @@ import ( // client is a global variable so it can be reused by the console var ( - client abcicli.Client + client abciclient.Client logger log.Logger ctx = context.Background() @@ -67,7 +67,7 @@ var RootCmd = &cobra.Command{ if client == nil { var err error - client, err = abcicli.NewClient(flagAddress, flagAbci, false) + client, err = abciclient.NewClient(flagAddress, flagAbci, false) if err != nil { return err } diff --git a/abci/example/example_test.go b/abci/example/example_test.go index fdfc5515e..cde8a15b1 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -17,7 +17,7 @@ import ( "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/example/kvstore" abciserver "github.com/tendermint/tendermint/abci/server" @@ -61,7 +61,7 @@ func testStream(t *testing.T, app types.Application) { }) // Connect to the socket - client := abcicli.NewSocketClient(socket, false) + client := abciclient.NewSocketClient(socket, false) client.SetLogger(log.TestingLogger().With("module", "abci-client")) err = client.Start() require.NoError(t, err) diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index a52312a00..9d026bd87 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -12,7 +12,7 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" abciserver "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/abci/types" @@ -229,7 +229,7 @@ func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) { } } -func makeSocketClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) { +func makeSocketClientServer(app types.Application, name string) (abciclient.Client, service.Service, error) { // Start the listener socket := fmt.Sprintf("unix://%s.sock", name) logger := log.TestingLogger() @@ -241,7 +241,7 @@ func makeSocketClientServer(app types.Application, name string) (abcicli.Client, } // Connect to the socket - client := abcicli.NewSocketClient(socket, false) + client := abciclient.NewSocketClient(socket, false) client.SetLogger(logger.With("module", "abci-client")) if err := client.Start(); err != nil { if err = server.Stop(); err != nil { @@ -253,7 +253,7 @@ func makeSocketClientServer(app types.Application, name string) (abcicli.Client, return client, server, nil } -func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) { +func makeGRPCClientServer(app types.Application, name string) (abciclient.Client, service.Service, error) { // Start the listener socket := fmt.Sprintf("unix://%s.sock", name) logger := log.TestingLogger() @@ -265,7 +265,7 @@ func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, s return nil, nil, err } - client := abcicli.NewGRPCClient(socket, true) + client := abciclient.NewGRPCClient(socket, true) client.SetLogger(logger.With("module", "abci-client")) if err := client.Start(); err != nil { if err := server.Stop(); err != nil { @@ -313,7 +313,7 @@ func TestClientServer(t *testing.T) { runClientTests(t, gclient) } -func runClientTests(t *testing.T, client abcicli.Client) { +func runClientTests(t *testing.T, client abciclient.Client) { // run some tests.... key := testKey value := key @@ -325,7 +325,7 @@ func runClientTests(t *testing.T, client abcicli.Client) { testClient(t, client, tx, key, value) } -func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) { +func testClient(t *testing.T, app abciclient.Client, tx []byte, key, value string) { ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx}) require.NoError(t, err) require.False(t, ar.IsErr(), ar) diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index 0fcfcadf7..40451baa9 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -11,9 +11,9 @@ import ( "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/libs/log" - pc "github.com/tendermint/tendermint/proto/tendermint/crypto" + cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" ) const ( @@ -30,7 +30,7 @@ type PersistentKVStoreApplication struct { // validator set ValUpdates []types.ValidatorUpdate - valAddrToPubKeyMap map[string]pc.PublicKey + valAddrToPubKeyMap map[string]cryptoproto.PublicKey logger log.Logger } @@ -46,7 +46,7 @@ func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication return &PersistentKVStoreApplication{ app: &Application{state: state}, - valAddrToPubKeyMap: make(map[string]pc.PublicKey), + valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey), logger: log.NewNopLogger(), } } @@ -194,8 +194,8 @@ func (app *PersistentKVStoreApplication) Validators() (validators []types.Valida return } -func MakeValSetChangeTx(pubkey pc.PublicKey, power int64) []byte { - pk, err := cryptoenc.PubKeyFromProto(pubkey) +func MakeValSetChangeTx(pubkey cryptoproto.PublicKey, power int64) []byte { + pk, err := encoding.PubKeyFromProto(pubkey) if err != nil { panic(err) } @@ -243,7 +243,7 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon // add, update, or remove a validator func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx { - pubkey, err := cryptoenc.PubKeyFromProto(v.PubKey) + pubkey, err := encoding.PubKeyFromProto(v.PubKey) if err != nil { panic(fmt.Errorf("can't decode public key: %w", err)) } diff --git a/abci/server/socket_server.go b/abci/server/socket_server.go index 543b444b1..85539645b 100644 --- a/abci/server/socket_server.go +++ b/abci/server/socket_server.go @@ -240,22 +240,15 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types // Pull responses from 'responses' and write them to conn. func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, responses <-chan *types.Response) { - var count int - var bufWriter = bufio.NewWriter(conn) - for { - var res = <-responses - err := types.WriteMessage(res, bufWriter) - if err != nil { + bw := bufio.NewWriter(conn) + for res := range responses { + if err := types.WriteMessage(res, bw); err != nil { closeConn <- fmt.Errorf("error writing message: %w", err) return } - if _, ok := res.Value.(*types.Response_Flush); ok { - err = bufWriter.Flush() - if err != nil { - closeConn <- fmt.Errorf("error flushing write buffer: %w", err) - return - } + if err := bw.Flush(); err != nil { + closeConn <- fmt.Errorf("error flushing write buffer: %w", err) + return } - count++ } } diff --git a/abci/tests/client_server_test.go b/abci/tests/client_server_test.go index 2ef64e66a..62dc6e07e 100644 --- a/abci/tests/client_server_test.go +++ b/abci/tests/client_server_test.go @@ -5,7 +5,7 @@ import ( "github.com/stretchr/testify/assert" - abciclient "github.com/tendermint/tendermint/abci/client" + abciclientent "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abciserver "github.com/tendermint/tendermint/abci/server" ) @@ -20,7 +20,7 @@ func TestClientServerNoAddrPrefix(t *testing.T) { err = server.Start() assert.NoError(t, err, "expected no error on server.Start") - client, err := abciclient.NewClient(addr, transport, true) + client, err := abciclientent.NewClient(addr, transport, true) assert.NoError(t, err, "expected no error on NewClient") err = client.Start() assert.NoError(t, err, "expected no error on client.Start") diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index 10d4a3e58..23adbe80d 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -7,14 +7,14 @@ import ( "fmt" mrand "math/rand" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/types" tmrand "github.com/tendermint/tendermint/libs/rand" ) var ctx = context.Background() -func InitChain(client abcicli.Client) error { +func InitChain(client abciclient.Client) error { total := 10 vals := make([]types.ValidatorUpdate, total) for i := 0; i < total; i++ { @@ -34,7 +34,7 @@ func InitChain(client abcicli.Client) error { return nil } -func Commit(client abcicli.Client, hashExp []byte) error { +func Commit(client abciclient.Client, hashExp []byte) error { res, err := client.CommitSync(ctx) data := res.Data if err != nil { @@ -51,7 +51,7 @@ func Commit(client abcicli.Client, hashExp []byte) error { return nil } -func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { +func DeliverTx(client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes}) code, data, log := res.Code, res.Data, res.Log if code != codeExp { @@ -70,7 +70,7 @@ func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp [] return nil } -func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { +func CheckTx(client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { res, _ := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes}) code, data, log := res.Code, res.Data, res.Log if code != codeExp { diff --git a/abci/types/client.go b/abci/types/client.go new file mode 100644 index 000000000..ab1254f4c --- /dev/null +++ b/abci/types/client.go @@ -0,0 +1 @@ +package types diff --git a/abci/types/pubkey.go b/abci/types/pubkey.go index a0f746992..c188fc8f5 100644 --- a/abci/types/pubkey.go +++ b/abci/types/pubkey.go @@ -4,7 +4,7 @@ import ( fmt "fmt" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/secp256k1" "github.com/tendermint/tendermint/crypto/sr25519" ) @@ -12,7 +12,7 @@ import ( func Ed25519ValidatorUpdate(pk []byte, power int64) ValidatorUpdate { pke := ed25519.PubKey(pk) - pkp, err := cryptoenc.PubKeyToProto(pke) + pkp, err := encoding.PubKeyToProto(pke) if err != nil { panic(err) } @@ -29,7 +29,7 @@ func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate { return Ed25519ValidatorUpdate(pk, power) case secp256k1.KeyType: pke := secp256k1.PubKey(pk) - pkp, err := cryptoenc.PubKeyToProto(pke) + pkp, err := encoding.PubKeyToProto(pke) if err != nil { panic(err) } @@ -39,7 +39,7 @@ func UpdateValidator(pk []byte, power int64, keyType string) ValidatorUpdate { } case sr25519.KeyType: pke := sr25519.PubKey(pk) - pkp, err := cryptoenc.PubKeyToProto(pke) + pkp, err := encoding.PubKeyToProto(pke) if err != nil { panic(err) } diff --git a/cmd/tendermint/commands/debug/dump.go b/cmd/tendermint/commands/debug/dump.go index f99975a75..cb1cc942a 100644 --- a/cmd/tendermint/commands/debug/dump.go +++ b/cmd/tendermint/commands/debug/dump.go @@ -11,7 +11,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cli" rpchttp "github.com/tendermint/tendermint/rpc/client/http" ) @@ -65,9 +65,9 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error { } home := viper.GetString(cli.HomeFlag) - conf := cfg.DefaultConfig() + conf := config.DefaultConfig() conf = conf.SetRoot(home) - cfg.EnsureRoot(conf.RootDir) + config.EnsureRoot(conf.RootDir) dumpDebugData(outDir, conf, rpc) @@ -79,7 +79,7 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error { return nil } -func dumpDebugData(outDir string, conf *cfg.Config, rpc *rpchttp.HTTP) { +func dumpDebugData(outDir string, conf *config.Config, rpc *rpchttp.HTTP) { start := time.Now().UTC() tmpDir, err := ioutil.TempDir(outDir, "tendermint_debug_tmp") diff --git a/cmd/tendermint/commands/debug/kill.go b/cmd/tendermint/commands/debug/kill.go index bef534152..3e749e513 100644 --- a/cmd/tendermint/commands/debug/kill.go +++ b/cmd/tendermint/commands/debug/kill.go @@ -14,7 +14,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cli" rpchttp "github.com/tendermint/tendermint/rpc/client/http" ) @@ -50,9 +50,9 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { } home := viper.GetString(cli.HomeFlag) - conf := cfg.DefaultConfig() + conf := config.DefaultConfig() conf = conf.SetRoot(home) - cfg.EnsureRoot(conf.RootDir) + config.EnsureRoot(conf.RootDir) // Create a temporary directory which will contain all the state dumps and // relevant files and directories that will be compressed into a file. diff --git a/cmd/tendermint/commands/debug/util.go b/cmd/tendermint/commands/debug/util.go index 226bfadc7..fa356c488 100644 --- a/cmd/tendermint/commands/debug/util.go +++ b/cmd/tendermint/commands/debug/util.go @@ -9,7 +9,7 @@ import ( "path" "path/filepath" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" rpchttp "github.com/tendermint/tendermint/rpc/client/http" ) @@ -48,7 +48,7 @@ func dumpConsensusState(rpc *rpchttp.HTTP, dir, filename string) error { // copyWAL copies the Tendermint node's WAL file. It returns an error if the // WAL file cannot be read or copied. -func copyWAL(conf *cfg.Config, dir string) error { +func copyWAL(conf *config.Config, dir string) error { walPath := conf.Consensus.WalFile() walFile := filepath.Base(walPath) diff --git a/cmd/tendermint/commands/gen_node_key.go b/cmd/tendermint/commands/gen_node_key.go index f796f4b7f..d8b493e3c 100644 --- a/cmd/tendermint/commands/gen_node_key.go +++ b/cmd/tendermint/commands/gen_node_key.go @@ -12,11 +12,9 @@ import ( // GenNodeKeyCmd allows the generation of a node key. It prints JSON-encoded // NodeKey to the standard output. var GenNodeKeyCmd = &cobra.Command{ - Use: "gen-node-key", - Aliases: []string{"gen_node_key"}, - Short: "Generate a new node key", - RunE: genNodeKey, - PreRun: deprecateSnakeCase, + Use: "gen-node-key", + Short: "Generate a new node key", + RunE: genNodeKey, } func genNodeKey(cmd *cobra.Command, args []string) error { diff --git a/cmd/tendermint/commands/gen_validator.go b/cmd/tendermint/commands/gen_validator.go index 09f84b09e..830518ce9 100644 --- a/cmd/tendermint/commands/gen_validator.go +++ b/cmd/tendermint/commands/gen_validator.go @@ -13,11 +13,9 @@ import ( // GenValidatorCmd allows the generation of a keypair for a // validator. var GenValidatorCmd = &cobra.Command{ - Use: "gen-validator", - Aliases: []string{"gen_validator"}, - Short: "Generate new validator keypair", - RunE: genValidator, - PreRun: deprecateSnakeCase, + Use: "gen-validator", + Short: "Generate new validator keypair", + RunE: genValidator, } func init() { diff --git a/cmd/tendermint/commands/inspect.go b/cmd/tendermint/commands/inspect.go new file mode 100644 index 000000000..3cd6ef572 --- /dev/null +++ b/cmd/tendermint/commands/inspect.go @@ -0,0 +1,63 @@ +package commands + +import ( + "context" + "os" + "os/signal" + "syscall" + + "github.com/spf13/cobra" + + "github.com/tendermint/tendermint/internal/inspect" +) + +// InspectCmd is the command for starting an inspect server. +var InspectCmd = &cobra.Command{ + Use: "inspect", + Short: "Run an inspect server for investigating Tendermint state", + Long: ` + inspect runs a subset of Tendermint's RPC endpoints that are useful for debugging + issues with Tendermint. + + When the Tendermint consensus engine detects inconsistent state, it will crash the + tendermint process. Tendermint will not start up while in this inconsistent state. + The inspect command can be used to query the block and state store using Tendermint + RPC calls to debug issues of inconsistent state. + `, + + RunE: runInspect, +} + +func init() { + InspectCmd.Flags(). + String("rpc.laddr", + config.RPC.ListenAddress, "RPC listenener address. Port required") + InspectCmd.Flags(). + String("db-backend", + config.DBBackend, "database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb") + InspectCmd.Flags(). + String("db-dir", config.DBPath, "database directory") +} + +func runInspect(cmd *cobra.Command, args []string) error { + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + + c := make(chan os.Signal, 1) + signal.Notify(c, syscall.SIGTERM, syscall.SIGINT) + go func() { + <-c + cancel() + }() + + ins, err := inspect.NewFromConfig(logger, config) + if err != nil { + return err + } + + logger.Info("starting inspect server") + if err := ins.Run(ctx); err != nil { + return err + } + return nil +} diff --git a/cmd/tendermint/commands/key_migrate.go b/cmd/tendermint/commands/key_migrate.go new file mode 100644 index 000000000..739af4a7d --- /dev/null +++ b/cmd/tendermint/commands/key_migrate.go @@ -0,0 +1,64 @@ +package commands + +import ( + "context" + "fmt" + + "github.com/spf13/cobra" + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/scripts/keymigrate" +) + +func MakeKeyMigrateCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "key-migrate", + Short: "Run Database key migration", + RunE: func(cmd *cobra.Command, args []string) error { + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + + contexts := []string{ + // this is ordered to put the + // (presumably) biggest/most important + // subsets first. + "blockstore", + "state", + "peerstore", + "tx_index", + "evidence", + "light", + } + + for idx, dbctx := range contexts { + logger.Info("beginning a key migration", + "dbctx", dbctx, + "num", idx+1, + "total", len(contexts), + ) + + db, err := cfg.DefaultDBProvider(&cfg.DBContext{ + ID: dbctx, + Config: config, + }) + + if err != nil { + return fmt.Errorf("constructing database handle: %w", err) + } + + if err = keymigrate.Migrate(ctx, db); err != nil { + return fmt.Errorf("running migration for context %q: %w", + dbctx, err) + } + } + + logger.Info("completed database migration successfully") + + return nil + }, + } + + // allow database info to be overridden via cli + addDBFlags(cmd) + + return cmd +} diff --git a/cmd/tendermint/commands/light.go b/cmd/tendermint/commands/light.go index 5e7446e51..f4c9a21da 100644 --- a/cmd/tendermint/commands/light.go +++ b/cmd/tendermint/commands/light.go @@ -11,7 +11,6 @@ import ( "time" "github.com/spf13/cobra" - dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/libs/log" diff --git a/cmd/tendermint/commands/probe_upnp.go b/cmd/tendermint/commands/probe_upnp.go index 4471024f9..4c71e099a 100644 --- a/cmd/tendermint/commands/probe_upnp.go +++ b/cmd/tendermint/commands/probe_upnp.go @@ -11,11 +11,9 @@ import ( // ProbeUpnpCmd adds capabilities to test the UPnP functionality. var ProbeUpnpCmd = &cobra.Command{ - Use: "probe-upnp", - Aliases: []string{"probe_upnp"}, - Short: "Test UPnP functionality", - RunE: probeUpnp, - PreRun: deprecateSnakeCase, + Use: "probe-upnp", + Short: "Test UPnP functionality", + RunE: probeUpnp, } func probeUpnp(cmd *cobra.Command, args []string) error { diff --git a/cmd/tendermint/commands/reindex_event.go b/cmd/tendermint/commands/reindex_event.go index ddc585c1f..58f11657b 100644 --- a/cmd/tendermint/commands/reindex_event.go +++ b/cmd/tendermint/commands/reindex_event.go @@ -6,17 +6,17 @@ import ( "strings" "github.com/spf13/cobra" - tmdb "github.com/tendermint/tm-db" + dbm "github.com/tendermint/tm-db" abcitypes "github.com/tendermint/tendermint/abci/types" tmcfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/progressbar" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/state/indexer/sink/kv" - "github.com/tendermint/tendermint/state/indexer/sink/psql" - "github.com/tendermint/tendermint/store" + "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" + "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" + "github.com/tendermint/tendermint/internal/store" + "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) @@ -29,11 +29,12 @@ var ReIndexEventCmd = &cobra.Command{ Use: "reindex-event", Short: "reindex events to the event store backends", Long: ` - reindex-event is an offline tooling to re-index block and tx events to the eventsinks, - you can run this command when the event store backend dropped/disconnected or you want to replace the backend. - The default start-height is 0, meaning the tooling will start reindex from the base block height(inclusive); and the - default end-height is 0, meaning the tooling will reindex until the latest block height(inclusive). User can omits - either or both arguments. +reindex-event is an offline tooling to re-index block and tx events to the eventsinks, +you can run this command when the event store backend dropped/disconnected or you want to +replace the backend. The default start-height is 0, meaning the tooling will start +reindex from the base block height(inclusive); and the default end-height is 0, meaning +the tooling will reindex until the latest block height(inclusive). User can omit +either or both arguments. `, Example: ` tendermint reindex-event @@ -106,7 +107,7 @@ func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) { if conn == "" { return nil, errors.New("the psql connection settings cannot be empty") } - es, _, err := psql.NewEventSink(conn, chainID) + es, err := psql.NewEventSink(conn, chainID) if err != nil { return nil, err } @@ -129,17 +130,17 @@ func loadEventSinks(cfg *tmcfg.Config) ([]indexer.EventSink, error) { } func loadStateAndBlockStore(cfg *tmcfg.Config) (*store.BlockStore, state.Store, error) { - dbType := tmdb.BackendType(cfg.DBBackend) + dbType := dbm.BackendType(cfg.DBBackend) // Get BlockStore - blockStoreDB, err := tmdb.NewDB("blockstore", dbType, cfg.DBDir()) + blockStoreDB, err := dbm.NewDB("blockstore", dbType, cfg.DBDir()) if err != nil { return nil, nil, err } blockStore := store.NewBlockStore(blockStoreDB) // Get StateStore - stateDB, err := tmdb.NewDB("state", dbType, cfg.DBDir()) + stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) if err != nil { return nil, nil, err } @@ -221,14 +222,15 @@ func checkValidHeight(bs state.BlockStore) error { } if startHeight < base { - return fmt.Errorf("%s (requested start height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, startHeight, base) + return fmt.Errorf("%s (requested start height: %d, base height: %d)", + coretypes.ErrHeightNotAvailable, startHeight, base) } height := bs.Height() if startHeight > height { return fmt.Errorf( - "%s (requested start height: %d, store height: %d)", ctypes.ErrHeightNotAvailable, startHeight, height) + "%s (requested start height: %d, store height: %d)", coretypes.ErrHeightNotAvailable, startHeight, height) } if endHeight == 0 || endHeight > height { @@ -238,13 +240,13 @@ func checkValidHeight(bs state.BlockStore) error { if endHeight < base { return fmt.Errorf( - "%s (requested end height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, endHeight, base) + "%s (requested end height: %d, base height: %d)", coretypes.ErrHeightNotAvailable, endHeight, base) } if endHeight < startHeight { return fmt.Errorf( "%s (requested the end height: %d is less than the start height: %d)", - ctypes.ErrInvalidRequest, startHeight, endHeight) + coretypes.ErrInvalidRequest, startHeight, endHeight) } return nil diff --git a/cmd/tendermint/commands/reindex_event_test.go b/cmd/tendermint/commands/reindex_event_test.go index 5d9459f5a..452a6b2a8 100644 --- a/cmd/tendermint/commands/reindex_event_test.go +++ b/cmd/tendermint/commands/reindex_event_test.go @@ -11,9 +11,9 @@ import ( abcitypes "github.com/tendermint/tendermint/abci/types" tmcfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/mocks" prototmstate "github.com/tendermint/tendermint/proto/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/state/mocks" "github.com/tendermint/tendermint/types" ) diff --git a/cmd/tendermint/commands/replay.go b/cmd/tendermint/commands/replay.go index 6e736bca2..e92274042 100644 --- a/cmd/tendermint/commands/replay.go +++ b/cmd/tendermint/commands/replay.go @@ -17,11 +17,9 @@ var ReplayCmd = &cobra.Command{ // ReplayConsoleCmd allows replaying of messages from the WAL in a // console. var ReplayConsoleCmd = &cobra.Command{ - Use: "replay-console", - Aliases: []string{"replay_console"}, - Short: "Replay messages from WAL in a console", + Use: "replay-console", + Short: "Replay messages from WAL in a console", Run: func(cmd *cobra.Command, args []string) { consensus.RunReplayFile(config.BaseConfig, config.Consensus, true) }, - PreRun: deprecateSnakeCase, } diff --git a/cmd/tendermint/commands/reset_priv_validator.go b/cmd/tendermint/commands/reset_priv_validator.go index 046780ef1..5f3e54700 100644 --- a/cmd/tendermint/commands/reset_priv_validator.go +++ b/cmd/tendermint/commands/reset_priv_validator.go @@ -14,11 +14,9 @@ import ( // ResetAllCmd removes the database of this Tendermint core // instance. var ResetAllCmd = &cobra.Command{ - Use: "unsafe-reset-all", - Aliases: []string{"unsafe_reset_all"}, - Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state", - RunE: resetAll, - PreRun: deprecateSnakeCase, + Use: "unsafe-reset-all", + Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state", + RunE: resetAll, } var keepAddrBook bool @@ -31,17 +29,15 @@ func init() { // ResetPrivValidatorCmd resets the private validator files. var ResetPrivValidatorCmd = &cobra.Command{ - Use: "unsafe-reset-priv-validator", - Aliases: []string{"unsafe_reset_priv_validator"}, - Short: "(unsafe) Reset this node's validator to genesis state", - RunE: resetPrivValidator, - PreRun: deprecateSnakeCase, + Use: "unsafe-reset-priv-validator", + Short: "(unsafe) Reset this node's validator to genesis state", + RunE: resetPrivValidator, } // XXX: this is totally unsafe. // it's only suitable for testnets. func resetAll(cmd *cobra.Command, args []string) error { - return ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidator.KeyFile(), + return ResetAll(config.DBDir(), config.PrivValidator.KeyFile(), config.PrivValidator.StateFile(), logger) } @@ -53,12 +49,7 @@ func resetPrivValidator(cmd *cobra.Command, args []string) error { // ResetAll removes address book files plus all data, and resets the privValdiator data. // Exported so other CLI tools can use it. -func ResetAll(dbDir, addrBookFile, privValKeyFile, privValStateFile string, logger log.Logger) error { - if keepAddrBook { - logger.Info("The address book remains intact") - } else { - removeAddrBook(addrBookFile, logger) - } +func ResetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger) error { if err := os.RemoveAll(dbDir); err == nil { logger.Info("Removed all blockchain history", "dir", dbDir) } else { @@ -91,11 +82,3 @@ func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) err } return nil } - -func removeAddrBook(addrBookFile string, logger log.Logger) { - if err := os.Remove(addrBookFile); err == nil { - logger.Info("Removed existing address book", "file", addrBookFile) - } else if !os.IsNotExist(err) { - logger.Info("Error removing address book", "file", addrBookFile, "err", err) - } -} diff --git a/cmd/tendermint/commands/rollback.go b/cmd/tendermint/commands/rollback.go new file mode 100644 index 000000000..5aff232be --- /dev/null +++ b/cmd/tendermint/commands/rollback.go @@ -0,0 +1,46 @@ +package commands + +import ( + "fmt" + + "github.com/spf13/cobra" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/state" +) + +var RollbackStateCmd = &cobra.Command{ + Use: "rollback", + Short: "rollback tendermint state by one height", + Long: ` +A state rollback is performed to recover from an incorrect application state transition, +when Tendermint has persisted an incorrect app hash and is thus unable to make +progress. Rollback overwrites a state at height n with the state at height n - 1. +The application should also roll back to height n - 1. No blocks are removed, so upon +restarting Tendermint the transactions in block n will be re-executed against the +application. +`, + RunE: func(cmd *cobra.Command, args []string) error { + height, hash, err := RollbackState(config) + if err != nil { + return fmt.Errorf("failed to rollback state: %w", err) + } + + fmt.Printf("Rolled back state to height %d and hash %v", height, hash) + return nil + }, +} + +// RollbackState takes the state at the current height n and overwrites it with the state +// at height n - 1. Note state here refers to tendermint state not application state. +// Returns the latest state height and app hash alongside an error if there was one. +func RollbackState(config *cfg.Config) (int64, []byte, error) { + // use the parsed config to load the block and state store + blockStore, stateStore, err := loadStateAndBlockStore(config) + if err != nil { + return -1, nil, err + } + + // rollback the last state + return state.Rollback(blockStore, stateStore) +} diff --git a/cmd/tendermint/commands/root.go b/cmd/tendermint/commands/root.go index 02f260de5..2289ae363 100644 --- a/cmd/tendermint/commands/root.go +++ b/cmd/tendermint/commands/root.go @@ -2,7 +2,6 @@ package commands import ( "fmt" - "strings" "time" "github.com/spf13/cobra" @@ -65,10 +64,3 @@ var RootCmd = &cobra.Command{ return nil }, } - -// deprecateSnakeCase is a util function for 0.34.1. Should be removed in 0.35 -func deprecateSnakeCase(cmd *cobra.Command, args []string) { - if strings.Contains(cmd.CalledAs(), "_") { - fmt.Println("Deprecated: snake_case commands will be replaced by hyphen-case commands in the next major release") - } -} diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go index 1c68fcffe..a5fa72ed5 100644 --- a/cmd/tendermint/commands/run_node.go +++ b/cmd/tendermint/commands/run_node.go @@ -3,6 +3,8 @@ package commands import ( "bytes" "crypto/sha256" + "errors" + "flag" "fmt" "io" "os" @@ -33,7 +35,22 @@ func AddNodeFlags(cmd *cobra.Command) { "socket address to listen on for connections from external priv-validator process") // node flags - cmd.Flags().Bool("fast-sync", config.FastSyncMode, "fast blockchain syncing") + cmd.Flags().Bool("blocksync.enable", config.BlockSync.Enable, "enable fast blockchain syncing") + + // TODO (https://github.com/tendermint/tendermint/issues/6908): remove this check after the v0.35 release cycle + // This check was added to give users an upgrade prompt to use the new flag for syncing. + // + // The pflag package does not have a native way to print a depcrecation warning + // and return an error. This logic was added to print a deprecation message to the user + // and then crash if the user attempts to use the old --fast-sync flag. + fs := flag.NewFlagSet("", flag.ExitOnError) + fs.Func("fast-sync", "deprecated", + func(string) error { + return errors.New("--fast-sync has been deprecated, please use --blocksync.enable") + }) + cmd.Flags().AddGoFlagSet(fs) + + cmd.Flags().MarkHidden("fast-sync") //nolint:errcheck cmd.Flags().BytesHexVar( &genesisHash, "genesis-hash", @@ -48,15 +65,11 @@ func AddNodeFlags(cmd *cobra.Command) { "proxy-app", config.ProxyApp, "proxy app address, or one of: 'kvstore',"+ - " 'persistent_kvstore' or 'noop' for local testing.") + " 'persistent_kvstore', 'e2e' or 'noop' for local testing.") cmd.Flags().String("abci", config.ABCI, "specify abci transport (socket | grpc)") // rpc flags cmd.Flags().String("rpc.laddr", config.RPC.ListenAddress, "RPC listen address. Port required") - cmd.Flags().String( - "rpc.grpc-laddr", - config.RPC.GRPCListenAddress, - "GRPC listen address (BroadcastTx only). Port required") cmd.Flags().Bool("rpc.unsafe", config.RPC.Unsafe, "enabled unsafe rpc methods") cmd.Flags().String("rpc.pprof-laddr", config.RPC.PprofListenAddress, "pprof listen address (https://golang.org/pkg/net/http/pprof)") @@ -67,8 +80,6 @@ func AddNodeFlags(cmd *cobra.Command) { "node listen address. (0.0.0.0:0 means any interface, any port)") cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes") cmd.Flags().String("p2p.persistent-peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers") - cmd.Flags().String("p2p.unconditional-peer-ids", - config.P2P.UnconditionalPeerIDs, "comma-delimited IDs of unconditional peers") cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding") cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "enable/disable Peer-Exchange") cmd.Flags().String("p2p.private-peer-ids", config.P2P.PrivatePeerIDs, "comma-delimited private peer IDs") @@ -83,7 +94,10 @@ func AddNodeFlags(cmd *cobra.Command) { config.Consensus.CreateEmptyBlocksInterval.String(), "the possible interval between empty blocks") - // db flags + addDBFlags(cmd) +} + +func addDBFlags(cmd *cobra.Command) { cmd.Flags().String( "db-backend", config.DBBackend, @@ -155,7 +169,7 @@ func checkGenesisHash(config *cfg.Config) error { // Compare with the flag. if !bytes.Equal(genesisHash, actualHash) { return fmt.Errorf( - "--genesis_hash=%X does not match %s hash: %X", + "--genesis-hash=%X does not match %s hash: %X", genesisHash, config.GenesisFile(), actualHash) } diff --git a/cmd/tendermint/commands/show_node_id.go b/cmd/tendermint/commands/show_node_id.go index 7a5814c3b..488f4c322 100644 --- a/cmd/tendermint/commands/show_node_id.go +++ b/cmd/tendermint/commands/show_node_id.go @@ -8,11 +8,9 @@ import ( // ShowNodeIDCmd dumps node's ID to the standard output. var ShowNodeIDCmd = &cobra.Command{ - Use: "show-node-id", - Aliases: []string{"show_node_id"}, - Short: "Show this node's ID", - RunE: showNodeID, - PreRun: deprecateSnakeCase, + Use: "show-node-id", + Short: "Show this node's ID", + RunE: showNodeID, } func showNodeID(cmd *cobra.Command, args []string) error { diff --git a/cmd/tendermint/commands/show_validator.go b/cmd/tendermint/commands/show_validator.go index 240ed943f..47b372c61 100644 --- a/cmd/tendermint/commands/show_validator.go +++ b/cmd/tendermint/commands/show_validator.go @@ -16,11 +16,9 @@ import ( // ShowValidatorCmd adds capabilities for showing the validator info. var ShowValidatorCmd = &cobra.Command{ - Use: "show-validator", - Aliases: []string{"show_validator"}, - Short: "Show this node's validator info", - RunE: showValidator, - PreRun: deprecateSnakeCase, + Use: "show-validator", + Short: "Show this node's validator info", + RunE: showValidator, } func showValidator(cmd *cobra.Command, args []string) error { diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go index a7307b38f..ef46f5428 100644 --- a/cmd/tendermint/commands/testnet.go +++ b/cmd/tendermint/commands/testnet.go @@ -226,7 +226,6 @@ func testnetFiles(cmd *cobra.Command, args []string) error { for i := 0; i < nValidators+nNonValidators; i++ { nodeDir := filepath.Join(outputDir, fmt.Sprintf("%s%d", nodeDirPrefix, i)) config.SetRoot(nodeDir) - config.P2P.AddrBookStrict = false config.P2P.AllowDuplicateIP = true if populatePersistentPeers { persistentPeersWithoutSelf := make([]string, 0) diff --git a/cmd/tendermint/main.go b/cmd/tendermint/main.go index b40624cc3..52a00e4c0 100644 --- a/cmd/tendermint/main.go +++ b/cmd/tendermint/main.go @@ -6,9 +6,9 @@ import ( cmd "github.com/tendermint/tendermint/cmd/tendermint/commands" "github.com/tendermint/tendermint/cmd/tendermint/commands/debug" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cli" - nm "github.com/tendermint/tendermint/node" + "github.com/tendermint/tendermint/node" ) func main() { @@ -28,6 +28,9 @@ func main() { cmd.ShowNodeIDCmd, cmd.GenNodeKeyCmd, cmd.VersionCmd, + cmd.InspectCmd, + cmd.RollbackStateCmd, + cmd.MakeKeyMigrateCommand(), debug.DebugCmd, cli.NewCompletionCmd(rootCmd, true), ) @@ -40,12 +43,12 @@ func main() { // * Provide their own DB implementation // can copy this file and use something other than the // node.NewDefault function - nodeFunc := nm.NewDefault + nodeFunc := node.NewDefault // Create & start node rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc)) - cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv(filepath.Join("$HOME", cfg.DefaultTendermintDir))) + cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv(filepath.Join("$HOME", config.DefaultTendermintDir))) if err := cmd.Execute(); err != nil { panic(err) } diff --git a/config/config.go b/config/config.go index 7d19616aa..a9b2576fd 100644 --- a/config/config.go +++ b/config/config.go @@ -30,7 +30,6 @@ const ( ModeSeed = "seed" BlockSyncV0 = "v0" - BlockSyncV2 = "v2" MempoolV0 = "v0" MempoolV1 = "v1" @@ -54,16 +53,14 @@ var ( defaultPrivValKeyName = "priv_validator_key.json" defaultPrivValStateName = "priv_validator_state.json" - defaultNodeKeyName = "node_key.json" - defaultAddrBookName = "addrbook.json" + defaultNodeKeyName = "node_key.json" defaultConfigFilePath = filepath.Join(defaultConfigDir, defaultConfigFileName) defaultGenesisJSONPath = filepath.Join(defaultConfigDir, defaultGenesisJSONName) defaultPrivValKeyPath = filepath.Join(defaultConfigDir, defaultPrivValKeyName) defaultPrivValStatePath = filepath.Join(defaultDataDir, defaultPrivValStateName) - defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName) - defaultAddrBookPath = filepath.Join(defaultConfigDir, defaultAddrBookName) + defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName) ) // Config defines the top level configuration for a Tendermint node @@ -76,7 +73,7 @@ type Config struct { P2P *P2PConfig `mapstructure:"p2p"` Mempool *MempoolConfig `mapstructure:"mempool"` StateSync *StateSyncConfig `mapstructure:"statesync"` - BlockSync *BlockSyncConfig `mapstructure:"fastsync"` + BlockSync *BlockSyncConfig `mapstructure:"blocksync"` Consensus *ConsensusConfig `mapstructure:"consensus"` TxIndex *TxIndexConfig `mapstructure:"tx-index"` Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"` @@ -142,9 +139,6 @@ func (cfg *Config) ValidateBasic() error { if err := cfg.RPC.ValidateBasic(); err != nil { return fmt.Errorf("error in [rpc] section: %w", err) } - if err := cfg.P2P.ValidateBasic(); err != nil { - return fmt.Errorf("error in [p2p] section: %w", err) - } if err := cfg.Mempool.ValidateBasic(); err != nil { return fmt.Errorf("error in [mempool] section: %w", err) } @@ -152,7 +146,7 @@ func (cfg *Config) ValidateBasic() error { return fmt.Errorf("error in [statesync] section: %w", err) } if err := cfg.BlockSync.ValidateBasic(); err != nil { - return fmt.Errorf("error in [fastsync] section: %w", err) + return fmt.Errorf("error in [blocksync] section: %w", err) } if err := cfg.Consensus.ValidateBasic(); err != nil { return fmt.Errorf("error in [consensus] section: %w", err) @@ -194,12 +188,6 @@ type BaseConfig struct { //nolint: maligned // - No priv_validator_key.json, priv_validator_state.json Mode string `mapstructure:"mode"` - // If this node is many blocks behind the tip of the chain, FastSync - // allows them to catchup quickly by downloading blocks in parallel - // and verifying their commits - // TODO: This should be moved to the blocksync config - FastSyncMode bool `mapstructure:"fast-sync"` - // Database backend: goleveldb | cleveldb | boltdb | rocksdb // * goleveldb (github.com/syndtr/goleveldb - most popular implementation) // - pure go @@ -242,23 +230,24 @@ type BaseConfig struct { //nolint: maligned // If true, query the ABCI app on connecting to a new peer // so the app can decide if we should keep the connection or not FilterPeers bool `mapstructure:"filter-peers"` // false + + Other map[string]interface{} `mapstructure:",remain"` } // DefaultBaseConfig returns a default base configuration for a Tendermint node func DefaultBaseConfig() BaseConfig { return BaseConfig{ - Genesis: defaultGenesisJSONPath, - NodeKey: defaultNodeKeyPath, - Mode: defaultMode, - Moniker: defaultMoniker, - ProxyApp: "tcp://127.0.0.1:26658", - ABCI: "socket", - LogLevel: DefaultLogLevel, - LogFormat: log.LogFormatPlain, - FastSyncMode: true, - FilterPeers: false, - DBBackend: "goleveldb", - DBPath: "data", + Genesis: defaultGenesisJSONPath, + NodeKey: defaultNodeKeyPath, + Mode: defaultMode, + Moniker: defaultMoniker, + ProxyApp: "tcp://127.0.0.1:26658", + ABCI: "socket", + LogLevel: DefaultLogLevel, + LogFormat: log.LogFormatPlain, + FilterPeers: false, + DBBackend: "goleveldb", + DBPath: "data", } } @@ -268,7 +257,6 @@ func TestBaseConfig() BaseConfig { cfg.chainID = "tendermint_test" cfg.Mode = ModeValidator cfg.ProxyApp = "kvstore" - cfg.FastSyncMode = false cfg.DBBackend = "memdb" return cfg } @@ -345,6 +333,28 @@ func (cfg BaseConfig) ValidateBasic() error { return fmt.Errorf("unknown mode: %v", cfg.Mode) } + // TODO (https://github.com/tendermint/tendermint/issues/6908) remove this check after the v0.35 release cycle. + // This check was added to give users an upgrade prompt to use the new + // configuration option in v0.35. In future release cycles they should no longer + // be using this configuration parameter so the check can be removed. + // The cfg.Other field can likely be removed at the same time if it is not referenced + // elsewhere as it was added to service this check. + if fs, ok := cfg.Other["fastsync"]; ok { + if _, ok := fs.(map[string]interface{}); ok { + return fmt.Errorf("a configuration section named 'fastsync' was found in the " + + "configuration file. The 'fastsync' section has been renamed to " + + "'blocksync', please update the 'fastsync' field in your configuration file to 'blocksync'") + } + } + if fs, ok := cfg.Other["fast-sync"]; ok { + if fs != "" { + return fmt.Errorf("a parameter named 'fast-sync' was found in the " + + "configuration file. The parameter to enable or disable quickly syncing with a blockchain" + + "has moved to the [blocksync] section of the configuration file as blocksync.enable. " + + "Please move the 'fast-sync' field in your configuration file to 'blocksync.enable'") + } + } + return nil } @@ -445,24 +455,10 @@ type RPCConfig struct { // A list of non simple headers the client is allowed to use with cross-domain requests. CORSAllowedHeaders []string `mapstructure:"cors-allowed-headers"` - // TCP or UNIX socket address for the gRPC server to listen on - // NOTE: This server only supports /broadcast_tx_commit - // Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36. - GRPCListenAddress string `mapstructure:"grpc-laddr"` - - // Maximum number of simultaneous connections. - // Does not include RPC (HTTP&WebSocket) connections. See max-open-connections - // If you want to accept a larger number than the default, make sure - // you increase your OS limits. - // 0 - unlimited. - // Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36. - GRPCMaxOpenConnections int `mapstructure:"grpc-max-open-connections"` - // Activate unsafe RPC commands like /dial-persistent-peers and /unsafe-flush-mempool Unsafe bool `mapstructure:"unsafe"` // Maximum number of simultaneous connections (including WebSocket). - // Does not include gRPC connections. See grpc-max-open-connections // If you want to accept a larger number than the default, make sure // you increase your OS limits. // 0 - unlimited. @@ -476,7 +472,7 @@ type RPCConfig struct { MaxSubscriptionClients int `mapstructure:"max-subscription-clients"` // Maximum number of unique queries a given client can /subscribe to - // If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set + // If you're using a Local RPC client and /broadcast_tx_commit, set this // to the estimated maximum number of broadcast_tx_commit calls per block. MaxSubscriptionsPerClient int `mapstructure:"max-subscriptions-per-client"` @@ -517,12 +513,10 @@ type RPCConfig struct { // DefaultRPCConfig returns a default configuration for the RPC server func DefaultRPCConfig() *RPCConfig { return &RPCConfig{ - ListenAddress: "tcp://127.0.0.1:26657", - CORSAllowedOrigins: []string{}, - CORSAllowedMethods: []string{http.MethodHead, http.MethodGet, http.MethodPost}, - CORSAllowedHeaders: []string{"Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time"}, - GRPCListenAddress: "", - GRPCMaxOpenConnections: 900, + ListenAddress: "tcp://127.0.0.1:26657", + CORSAllowedOrigins: []string{}, + CORSAllowedMethods: []string{http.MethodHead, http.MethodGet, http.MethodPost}, + CORSAllowedHeaders: []string{"Origin", "Accept", "Content-Type", "X-Requested-With", "X-Server-Time"}, Unsafe: false, MaxOpenConnections: 900, @@ -543,7 +537,6 @@ func DefaultRPCConfig() *RPCConfig { func TestRPCConfig() *RPCConfig { cfg := DefaultRPCConfig() cfg.ListenAddress = "tcp://127.0.0.1:36657" - cfg.GRPCListenAddress = "tcp://127.0.0.1:36658" cfg.Unsafe = true return cfg } @@ -551,9 +544,6 @@ func TestRPCConfig() *RPCConfig { // ValidateBasic performs basic validation (checking param bounds, etc.) and // returns an error if any check fails. func (cfg *RPCConfig) ValidateBasic() error { - if cfg.GRPCMaxOpenConnections < 0 { - return errors.New("grpc-max-open-connections can't be negative") - } if cfg.MaxOpenConnections < 0 { return errors.New("max-open-connections can't be negative") } @@ -631,25 +621,6 @@ type P2PConfig struct { //nolint: maligned // UPNP port forwarding UPNP bool `mapstructure:"upnp"` - // Path to address book - AddrBook string `mapstructure:"addr-book-file"` - - // Set true for strict address routability rules - // Set false for private or local networks - AddrBookStrict bool `mapstructure:"addr-book-strict"` - - // Maximum number of inbound peers - // - // TODO: Remove once p2p refactor is complete in favor of MaxConnections. - // ref: https://github.com/tendermint/tendermint/issues/5670 - MaxNumInboundPeers int `mapstructure:"max-num-inbound-peers"` - - // Maximum number of outbound peers to connect to, excluding persistent peers. - // - // TODO: Remove once p2p refactor is complete in favor of MaxConnections. - // ref: https://github.com/tendermint/tendermint/issues/5670 - MaxNumOutboundPeers int `mapstructure:"max-num-outbound-peers"` - // MaxConnections defines the maximum number of connected peers (inbound and // outbound). MaxConnections uint16 `mapstructure:"max-connections"` @@ -658,24 +629,6 @@ type P2PConfig struct { //nolint: maligned // attempts per IP address. MaxIncomingConnectionAttempts uint `mapstructure:"max-incoming-connection-attempts"` - // List of node IDs, to which a connection will be (re)established ignoring any existing limits - UnconditionalPeerIDs string `mapstructure:"unconditional-peer-ids"` - - // Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) - PersistentPeersMaxDialPeriod time.Duration `mapstructure:"persistent-peers-max-dial-period"` - - // Time to wait before flushing messages out on the connection - FlushThrottleTimeout time.Duration `mapstructure:"flush-throttle-timeout"` - - // Maximum size of a message packet payload, in bytes - MaxPacketMsgPayloadSize int `mapstructure:"max-packet-msg-payload-size"` - - // Rate at which packets can be sent, in bytes/second - SendRate int64 `mapstructure:"send-rate"` - - // Rate at which packets can be received, in bytes/second - RecvRate int64 `mapstructure:"recv-rate"` - // Set true to enable the peer-exchange reactor PexReactor bool `mapstructure:"pex"` @@ -694,13 +647,9 @@ type P2PConfig struct { //nolint: maligned // Force dial to fail TestDialFail bool `mapstructure:"test-dial-fail"` - // DisableLegacy is used mostly for testing to enable or disable the legacy - // P2P stack. - DisableLegacy bool `mapstructure:"disable-legacy"` - // Makes it possible to configure which queue backend the p2p - // layer uses. Options are: "fifo", "priority" and "wdrr", - // with the default being "fifo". + // layer uses. Options are: "fifo" and "priority", + // with the default being "priority". QueueType string `mapstructure:"queue-type"` } @@ -710,28 +659,14 @@ func DefaultP2PConfig() *P2PConfig { ListenAddress: "tcp://0.0.0.0:26656", ExternalAddress: "", UPNP: false, - AddrBook: defaultAddrBookPath, - AddrBookStrict: true, - MaxNumInboundPeers: 40, - MaxNumOutboundPeers: 10, MaxConnections: 64, MaxIncomingConnectionAttempts: 100, - PersistentPeersMaxDialPeriod: 0 * time.Second, - FlushThrottleTimeout: 100 * time.Millisecond, - // The MTU (Maximum Transmission Unit) for Ethernet is 1500 bytes. - // The IP header and the TCP header take up 20 bytes each at least (unless - // optional header fields are used) and thus the max for (non-Jumbo frame) - // Ethernet is 1500 - 20 -20 = 1460 - // Source: https://stackoverflow.com/a/3074427/820520 - MaxPacketMsgPayloadSize: 1400, - SendRate: 5120000, // 5 mB/s - RecvRate: 5120000, // 5 mB/s - PexReactor: true, - AllowDuplicateIP: false, - HandshakeTimeout: 20 * time.Second, - DialTimeout: 3 * time.Second, - TestDialFail: false, - QueueType: "priority", + PexReactor: true, + AllowDuplicateIP: false, + HandshakeTimeout: 20 * time.Second, + DialTimeout: 3 * time.Second, + TestDialFail: false, + QueueType: "priority", } } @@ -739,43 +674,10 @@ func DefaultP2PConfig() *P2PConfig { func TestP2PConfig() *P2PConfig { cfg := DefaultP2PConfig() cfg.ListenAddress = "tcp://127.0.0.1:36656" - cfg.FlushThrottleTimeout = 10 * time.Millisecond cfg.AllowDuplicateIP = true return cfg } -// AddrBookFile returns the full path to the address book -func (cfg *P2PConfig) AddrBookFile() string { - return rootify(cfg.AddrBook, cfg.RootDir) -} - -// ValidateBasic performs basic validation (checking param bounds, etc.) and -// returns an error if any check fails. -func (cfg *P2PConfig) ValidateBasic() error { - if cfg.MaxNumInboundPeers < 0 { - return errors.New("max-num-inbound-peers can't be negative") - } - if cfg.MaxNumOutboundPeers < 0 { - return errors.New("max-num-outbound-peers can't be negative") - } - if cfg.FlushThrottleTimeout < 0 { - return errors.New("flush-throttle-timeout can't be negative") - } - if cfg.PersistentPeersMaxDialPeriod < 0 { - return errors.New("persistent-peers-max-dial-period can't be negative") - } - if cfg.MaxPacketMsgPayloadSize < 0 { - return errors.New("max-packet-msg-payload-size can't be negative") - } - if cfg.SendRate < 0 { - return errors.New("send-rate can't be negative") - } - if cfg.RecvRate < 0 { - return errors.New("recv-rate can't be negative") - } - return nil -} - //----------------------------------------------------------------------------- // MempoolConfig @@ -882,15 +784,46 @@ func (cfg *MempoolConfig) ValidateBasic() error { // StateSyncConfig defines the configuration for the Tendermint state sync service type StateSyncConfig struct { - Enable bool `mapstructure:"enable"` - TempDir string `mapstructure:"temp-dir"` - RPCServers []string `mapstructure:"rpc-servers"` - TrustPeriod time.Duration `mapstructure:"trust-period"` - TrustHeight int64 `mapstructure:"trust-height"` - TrustHash string `mapstructure:"trust-hash"` - DiscoveryTime time.Duration `mapstructure:"discovery-time"` + // State sync rapidly bootstraps a new node by discovering, fetching, and restoring a + // state machine snapshot from peers instead of fetching and replaying historical + // blocks. Requires some peers in the network to take and serve state machine + // snapshots. State sync is not attempted if the node has any local state + // (LastBlockHeight > 0). The node will have a truncated block history, starting from + // the height of the snapshot. + Enable bool `mapstructure:"enable"` + + // State sync uses light client verification to verify state. This can be done either + // through the P2P layer or the RPC layer. Set this to true to use the P2P layer. If + // false (default), the RPC layer will be used. + UseP2P bool `mapstructure:"use-p2p"` + + // If using RPC, at least two addresses need to be provided. They should be compatible + // with net.Dial, for example: "host.example.com:2125". + RPCServers []string `mapstructure:"rpc-servers"` + + // The hash and height of a trusted block. Must be within the trust-period. + TrustHeight int64 `mapstructure:"trust-height"` + TrustHash string `mapstructure:"trust-hash"` + + // The trust period should be set so that Tendermint can detect and gossip + // misbehavior before it is considered expired. For chains based on the Cosmos SDK, + // one day less than the unbonding period should suffice. + TrustPeriod time.Duration `mapstructure:"trust-period"` + + // Time to spend discovering snapshots before initiating a restore. + DiscoveryTime time.Duration `mapstructure:"discovery-time"` + + // Temporary directory for state sync snapshot chunks, defaults to os.TempDir(). + // The synchronizer will create a new, randomly named directory within this directory + // and remove it when the sync is complete. + TempDir string `mapstructure:"temp-dir"` + + // The timeout duration before re-requesting a chunk, possibly from a different + // peer (default: 15 seconds). ChunkRequestTimeout time.Duration `mapstructure:"chunk-request-timeout"` - Fetchers int32 `mapstructure:"fetchers"` + + // The number of concurrent chunk and block fetchers to run (default: 4). + Fetchers int32 `mapstructure:"fetchers"` } func (cfg *StateSyncConfig) TrustHashBytes() []byte { @@ -919,49 +852,51 @@ func TestStateSyncConfig() *StateSyncConfig { // ValidateBasic performs basic validation. func (cfg *StateSyncConfig) ValidateBasic() error { - if cfg.Enable { - if len(cfg.RPCServers) == 0 { - return errors.New("rpc-servers is required") - } + if !cfg.Enable { + return nil + } + // If we're not using the P2P stack then we need to validate the + // RPCServers + if !cfg.UseP2P { if len(cfg.RPCServers) < 2 { - return errors.New("at least two rpc-servers entries is required") + return errors.New("at least two rpc-servers must be specified") } for _, server := range cfg.RPCServers { - if len(server) == 0 { + if server == "" { return errors.New("found empty rpc-servers entry") } } + } - if cfg.DiscoveryTime != 0 && cfg.DiscoveryTime < 5*time.Second { - return errors.New("discovery time must be 0s or greater than five seconds") - } + if cfg.DiscoveryTime != 0 && cfg.DiscoveryTime < 5*time.Second { + return errors.New("discovery time must be 0s or greater than five seconds") + } - if cfg.TrustPeriod <= 0 { - return errors.New("trusted-period is required") - } + if cfg.TrustPeriod <= 0 { + return errors.New("trusted-period is required") + } - if cfg.TrustHeight <= 0 { - return errors.New("trusted-height is required") - } + if cfg.TrustHeight <= 0 { + return errors.New("trusted-height is required") + } - if len(cfg.TrustHash) == 0 { - return errors.New("trusted-hash is required") - } + if len(cfg.TrustHash) == 0 { + return errors.New("trusted-hash is required") + } - _, err := hex.DecodeString(cfg.TrustHash) - if err != nil { - return fmt.Errorf("invalid trusted-hash: %w", err) - } + _, err := hex.DecodeString(cfg.TrustHash) + if err != nil { + return fmt.Errorf("invalid trusted-hash: %w", err) + } - if cfg.ChunkRequestTimeout < 5*time.Second { - return errors.New("chunk-request-timeout must be at least 5 seconds") - } + if cfg.ChunkRequestTimeout < 5*time.Second { + return errors.New("chunk-request-timeout must be at least 5 seconds") + } - if cfg.Fetchers <= 0 { - return errors.New("fetchers is required") - } + if cfg.Fetchers <= 0 { + return errors.New("fetchers is required") } return nil @@ -970,14 +905,17 @@ func (cfg *StateSyncConfig) ValidateBasic() error { //----------------------------------------------------------------------------- // BlockSyncConfig (formerly known as FastSync) defines the configuration for the Tendermint block sync service +// If this node is many blocks behind the tip of the chain, BlockSync +// allows them to catchup quickly by downloading blocks in parallel +// and verifying their commits. type BlockSyncConfig struct { - Version string `mapstructure:"version"` + Enable bool `mapstructure:"enable"` } // DefaultBlockSyncConfig returns a default configuration for the block sync service func DefaultBlockSyncConfig() *BlockSyncConfig { return &BlockSyncConfig{ - Version: BlockSyncV0, + Enable: true, } } @@ -987,16 +925,7 @@ func TestBlockSyncConfig() *BlockSyncConfig { } // ValidateBasic performs basic validation. -func (cfg *BlockSyncConfig) ValidateBasic() error { - switch cfg.Version { - case BlockSyncV0: - return nil - case BlockSyncV2: - return errors.New("blocksync version v2 is no longer supported. Please use v0") - default: - return fmt.Errorf("unknown blocksync version %s", cfg.Version) - } -} +func (cfg *BlockSyncConfig) ValidateBasic() error { return nil } //----------------------------------------------------------------------------- // ConsensusConfig diff --git a/config/config_test.go b/config/config_test.go index 075cedc6a..181314492 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -66,7 +66,6 @@ func TestRPCConfigValidateBasic(t *testing.T) { assert.NoError(t, cfg.ValidateBasic()) fieldsToTest := []string{ - "GRPCMaxOpenConnections", "MaxOpenConnections", "MaxSubscriptionClients", "MaxSubscriptionsPerClient", @@ -82,26 +81,6 @@ func TestRPCConfigValidateBasic(t *testing.T) { } } -func TestP2PConfigValidateBasic(t *testing.T) { - cfg := TestP2PConfig() - assert.NoError(t, cfg.ValidateBasic()) - - fieldsToTest := []string{ - "MaxNumInboundPeers", - "MaxNumOutboundPeers", - "FlushThrottleTimeout", - "MaxPacketMsgPayloadSize", - "SendRate", - "RecvRate", - } - - for _, fieldName := range fieldsToTest { - reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) - assert.Error(t, cfg.ValidateBasic()) - reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) - } -} - func TestMempoolConfigValidateBasic(t *testing.T) { cfg := TestMempoolConfig() assert.NoError(t, cfg.ValidateBasic()) @@ -128,13 +107,6 @@ func TestStateSyncConfigValidateBasic(t *testing.T) { func TestBlockSyncConfigValidateBasic(t *testing.T) { cfg := TestBlockSyncConfig() assert.NoError(t, cfg.ValidateBasic()) - - // tamper with version - cfg.Version = "v2" - assert.Error(t, cfg.ValidateBasic()) - - cfg.Version = "invalid" - assert.Error(t, cfg.ValidateBasic()) } func TestConsensusConfig_ValidateBasic(t *testing.T) { diff --git a/config/db.go b/config/db.go index 3ae274a50..8f489a87a 100644 --- a/config/db.go +++ b/config/db.go @@ -1,9 +1,10 @@ package config import ( + dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - db "github.com/tendermint/tm-db" ) // ServiceProvider takes a config and a logger and returns a ready to go Node. @@ -16,11 +17,11 @@ type DBContext struct { } // DBProvider takes a DBContext and returns an instantiated DB. -type DBProvider func(*DBContext) (db.DB, error) +type DBProvider func(*DBContext) (dbm.DB, error) // DefaultDBProvider returns a database using the DBBackend and DBDir // specified in the Config. -func DefaultDBProvider(ctx *DBContext) (db.DB, error) { - dbType := db.BackendType(ctx.Config.DBBackend) - return db.NewDB(ctx.ID, dbType, ctx.Config.DBDir()) +func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { + dbType := dbm.BackendType(ctx.Config.DBBackend) + return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()) } diff --git a/config/toml.go b/config/toml.go index edb192109..3be385060 100644 --- a/config/toml.go +++ b/config/toml.go @@ -97,11 +97,6 @@ moniker = "{{ .BaseConfig.Moniker }}" # - No priv_validator_key.json, priv_validator_state.json mode = "{{ .BaseConfig.Mode }}" -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast-sync = {{ .BaseConfig.FastSyncMode }} - # Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb # * goleveldb (github.com/syndtr/goleveldb - most popular implementation) # - pure go @@ -164,15 +159,15 @@ state-file = "{{ js .PrivValidator.State }}" # when the listenAddr is prefixed with grpc instead of tcp it will use the gRPC Client laddr = "{{ .PrivValidator.ListenAddr }}" -# Client certificate generated while creating needed files for secure connection. +# Path to the client certificate generated while creating needed files for secure connection. # If a remote validator address is provided but no certificate, the connection will be insecure client-certificate-file = "{{ js .PrivValidator.ClientCertificate }}" # Client key generated while creating certificates for secure connection -validator-client-key-file = "{{ js .PrivValidator.ClientKey }}" +client-key-file = "{{ js .PrivValidator.ClientKey }}" -# Path Root Certificate Authority used to sign both client and server certificates -certificate-authority = "{{ js .PrivValidator.RootCA }}" +# Path to the Root Certificate Authority used to sign both client and server certificates +root-ca-file = "{{ js .PrivValidator.RootCA }}" ####################################################################### @@ -198,26 +193,10 @@ cors-allowed-methods = [{{ range .RPC.CORSAllowedMethods }}{{ printf "%q, " . }} # A list of non simple headers the client is allowed to use with cross-domain requests cors-allowed-headers = [{{ range .RPC.CORSAllowedHeaders }}{{ printf "%q, " . }}{{end}}] -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36. -grpc-laddr = "{{ .RPC.GRPCListenAddress }}" - -# Maximum number of simultaneous connections. -# Does not include RPC (HTTP&WebSocket) connections. See max-open-connections -# If you want to accept a larger number than the default, make sure -# you increase your OS limits. -# 0 - unlimited. -# Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} -# 1024 - 40 - 10 - 50 = 924 = ~900 -# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36. -grpc-max-open-connections = {{ .RPC.GRPCMaxOpenConnections }} - # Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool unsafe = {{ .RPC.Unsafe }} # Maximum number of simultaneous connections (including WebSocket). -# Does not include gRPC connections. See grpc-max-open-connections # If you want to accept a larger number than the default, make sure # you increase your OS limits. # 0 - unlimited. @@ -231,8 +210,8 @@ max-open-connections = {{ .RPC.MaxOpenConnections }} max-subscription-clients = {{ .RPC.MaxSubscriptionClients }} # Maximum number of unique queries a given client can /subscribe to -# If you're using GRPC (or Local RPC client) and /broadcast_tx_commit, set to -# the estimated # maximum number of broadcast_tx_commit calls per block. +# If you're using a Local RPC client and /broadcast_tx_commit, set this +# to the estimated maximum number of broadcast_tx_commit calls per block. max-subscriptions-per-client = {{ .RPC.MaxSubscriptionsPerClient }} # How long to wait for a tx to be committed during /broadcast_tx_commit. @@ -270,9 +249,6 @@ pprof-laddr = "{{ .RPC.PprofListenAddress }}" ####################################################### [p2p] -# Enable the new p2p layer. -disable-legacy = {{ .P2P.DisableLegacy }} - # Select the p2p internal queue queue-type = "{{ .P2P.QueueType }}" @@ -304,49 +280,12 @@ persistent-peers = "{{ .P2P.PersistentPeers }}" # UPNP port forwarding upnp = {{ .P2P.UPNP }} -# Path to address book -addr-book-file = "{{ js .P2P.AddrBook }}" - -# Set true for strict address routability rules -# Set false for private or local networks -addr-book-strict = {{ .P2P.AddrBookStrict }} - -# Maximum number of inbound peers -# -# TODO: Remove once p2p refactor is complete in favor of MaxConnections. -# ref: https://github.com/tendermint/tendermint/issues/5670 -max-num-inbound-peers = {{ .P2P.MaxNumInboundPeers }} - -# Maximum number of outbound peers to connect to, excluding persistent peers -# -# TODO: Remove once p2p refactor is complete in favor of MaxConnections. -# ref: https://github.com/tendermint/tendermint/issues/5670 -max-num-outbound-peers = {{ .P2P.MaxNumOutboundPeers }} - # Maximum number of connections (inbound and outbound). max-connections = {{ .P2P.MaxConnections }} # Rate limits the number of incoming connection attempts per IP address. max-incoming-connection-attempts = {{ .P2P.MaxIncomingConnectionAttempts }} -# List of node IDs, to which a connection will be (re)established ignoring any existing limits -unconditional-peer-ids = "{{ .P2P.UnconditionalPeerIDs }}" - -# Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) -persistent-peers-max-dial-period = "{{ .P2P.PersistentPeersMaxDialPeriod }}" - -# Time to wait before flushing messages out on the connection -flush-throttle-timeout = "{{ .P2P.FlushThrottleTimeout }}" - -# Maximum size of a message packet payload, in bytes -max-packet-msg-payload-size = {{ .P2P.MaxPacketMsgPayloadSize }} - -# Rate at which packets can be sent, in bytes/second -send-rate = {{ .P2P.SendRate }} - -# Rate at which packets can be received, in bytes/second -recv-rate = {{ .P2P.RecvRate }} - # Set true to enable the peer-exchange reactor pex = {{ .P2P.PexReactor }} @@ -426,22 +365,30 @@ ttl-num-blocks = {{ .Mempool.TTLNumBlocks }} # starting from the height of the snapshot. enable = {{ .StateSync.Enable }} -# RPC servers (comma-separated) for light client verification of the synced state machine and -# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding -# header hash obtained from a trusted source, and a period during which validators can be trusted. -# -# For Cosmos SDK-based chains, trust-period should usually be about 2/3 of the unbonding time (~2 -# weeks) during which they can be financially punished (slashed) for misbehavior. +# State sync uses light client verification to verify state. This can be done either through the +# P2P layer or RPC layer. Set this to true to use the P2P layer. If false (default), RPC layer +# will be used. +use-p2p = {{ .StateSync.UseP2P }} + +# If using RPC, at least two addresses need to be provided. They should be compatible with net.Dial, +# for example: "host.example.com:2125" rpc-servers = "{{ StringsJoin .StateSync.RPCServers "," }}" + +# The hash and height of a trusted block. Must be within the trust-period. trust-height = {{ .StateSync.TrustHeight }} trust-hash = "{{ .StateSync.TrustHash }}" + +# The trust period should be set so that Tendermint can detect and gossip misbehavior before +# it is considered expired. For chains based on the Cosmos SDK, one day less than the unbonding +# period should suffice. trust-period = "{{ .StateSync.TrustPeriod }}" # Time to spend discovering snapshots before initiating a restore. discovery-time = "{{ .StateSync.DiscoveryTime }}" -# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). -# Will create a new, randomly named directory within, and remove it when done. +# Temporary directory for state sync snapshot chunks, defaults to os.TempDir(). +# The synchronizer will create a new, randomly named directory within this directory +# and remove it when the sync is complete. temp-dir = "{{ .StateSync.TempDir }}" # The timeout duration before re-requesting a chunk, possibly from a different @@ -454,12 +401,12 @@ fetchers = "{{ .StateSync.Fetchers }}" ####################################################### ### Block Sync Configuration Connections ### ####################################################### -[fastsync] +[blocksync] -# Block Sync version to use: -# 1) "v0" (default) - the legacy block sync implementation -# 2) "v2" - DEPRECATED, please use v0 -version = "{{ .BlockSync.Version }}" +# If this node is many blocks behind the tip of the chain, BlockSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +enable = {{ .BlockSync.Enable }} ####################################################### ### Consensus Configuration Options ### @@ -508,7 +455,7 @@ peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}" [tx-index] # The backend database list to back the indexer. -# If list contains null, meaning no indexer service will be used. +# If list contains "null" or "", meaning no indexer service will be used. # # The application will set which txs to index. In some cases a node operator will be able # to decide which txs to index based on configuration set in the application. @@ -516,8 +463,8 @@ peer-query-maj23-sleep-duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}" # Options: # 1) "null" # 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. # 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. indexer = [{{ range $i, $e := .TxIndex.Indexer }}{{if $i}}, {{end}}{{ printf "%q" $e}}{{end}}] # The PostgreSQL connection configuration, the connection format: diff --git a/config/toml_test.go b/config/toml_test.go index 418cea8fa..ccf818d65 100644 --- a/config/toml_test.go +++ b/config/toml_test.go @@ -36,9 +36,7 @@ func TestEnsureRoot(t *testing.T) { data, err := ioutil.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath)) require.Nil(err) - if !checkConfig(string(data)) { - t.Fatalf("config file missing some information") - } + checkConfig(t, string(data)) ensureFiles(t, tmpDir, "data") } @@ -57,9 +55,7 @@ func TestEnsureTestRoot(t *testing.T) { data, err := ioutil.ReadFile(filepath.Join(rootDir, defaultConfigFilePath)) require.Nil(err) - if !checkConfig(string(data)) { - t.Fatalf("config file missing some information") - } + checkConfig(t, string(data)) // TODO: make sure the cfg returned and testconfig are the same! baseConfig := DefaultBaseConfig() @@ -67,16 +63,15 @@ func TestEnsureTestRoot(t *testing.T) { ensureFiles(t, rootDir, defaultDataDir, baseConfig.Genesis, pvConfig.Key, pvConfig.State) } -func checkConfig(configFile string) bool { - var valid bool - +func checkConfig(t *testing.T, configFile string) { + t.Helper() // list of words we expect in the config var elems = []string{ "moniker", "seeds", "proxy-app", - "fast_sync", - "create_empty_blocks", + "blocksync", + "create-empty-blocks", "peer", "timeout", "broadcast", @@ -89,10 +84,7 @@ func checkConfig(configFile string) bool { } for _, e := range elems { if !strings.Contains(configFile, e) { - valid = false - } else { - valid = true + t.Errorf("config file was expected to contain %s but did not", e) } } - return valid } diff --git a/crypto/armor/armor.go b/crypto/armor/armor.go deleted file mode 100644 index c15d070e6..000000000 --- a/crypto/armor/armor.go +++ /dev/null @@ -1,39 +0,0 @@ -package armor - -import ( - "bytes" - "fmt" - "io/ioutil" - - "golang.org/x/crypto/openpgp/armor" -) - -func EncodeArmor(blockType string, headers map[string]string, data []byte) string { - buf := new(bytes.Buffer) - w, err := armor.Encode(buf, blockType, headers) - if err != nil { - panic(fmt.Errorf("could not encode ascii armor: %s", err)) - } - _, err = w.Write(data) - if err != nil { - panic(fmt.Errorf("could not encode ascii armor: %s", err)) - } - err = w.Close() - if err != nil { - panic(fmt.Errorf("could not encode ascii armor: %s", err)) - } - return buf.String() -} - -func DecodeArmor(armorStr string) (blockType string, headers map[string]string, data []byte, err error) { - buf := bytes.NewBufferString(armorStr) - block, err := armor.Decode(buf) - if err != nil { - return "", nil, nil, err - } - data, err = ioutil.ReadAll(block.Body) - if err != nil { - return "", nil, nil, err - } - return block.Type, block.Header, data, nil -} diff --git a/crypto/armor/armor_test.go b/crypto/armor/armor_test.go deleted file mode 100644 index 8ecfaa0e1..000000000 --- a/crypto/armor/armor_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package armor - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestArmor(t *testing.T) { - blockType := "MINT TEST" - data := []byte("somedata") - armorStr := EncodeArmor(blockType, nil, data) - - // Decode armorStr and test for equivalence. - blockType2, _, data2, err := DecodeArmor(armorStr) - require.Nil(t, err, "%+v", err) - assert.Equal(t, blockType, blockType2) - assert.Equal(t, data, data2) -} diff --git a/crypto/encoding/codec.go b/crypto/encoding/codec.go index 64e4e7c6f..37249bcb3 100644 --- a/crypto/encoding/codec.go +++ b/crypto/encoding/codec.go @@ -8,34 +8,34 @@ import ( "github.com/tendermint/tendermint/crypto/secp256k1" "github.com/tendermint/tendermint/crypto/sr25519" "github.com/tendermint/tendermint/libs/json" - pc "github.com/tendermint/tendermint/proto/tendermint/crypto" + cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" ) func init() { - json.RegisterType((*pc.PublicKey)(nil), "tendermint.crypto.PublicKey") - json.RegisterType((*pc.PublicKey_Ed25519)(nil), "tendermint.crypto.PublicKey_Ed25519") - json.RegisterType((*pc.PublicKey_Secp256K1)(nil), "tendermint.crypto.PublicKey_Secp256K1") + json.RegisterType((*cryptoproto.PublicKey)(nil), "tendermint.crypto.PublicKey") + json.RegisterType((*cryptoproto.PublicKey_Ed25519)(nil), "tendermint.crypto.PublicKey_Ed25519") + json.RegisterType((*cryptoproto.PublicKey_Secp256K1)(nil), "tendermint.crypto.PublicKey_Secp256K1") } // PubKeyToProto takes crypto.PubKey and transforms it to a protobuf Pubkey -func PubKeyToProto(k crypto.PubKey) (pc.PublicKey, error) { - var kp pc.PublicKey +func PubKeyToProto(k crypto.PubKey) (cryptoproto.PublicKey, error) { + var kp cryptoproto.PublicKey switch k := k.(type) { case ed25519.PubKey: - kp = pc.PublicKey{ - Sum: &pc.PublicKey_Ed25519{ + kp = cryptoproto.PublicKey{ + Sum: &cryptoproto.PublicKey_Ed25519{ Ed25519: k, }, } case secp256k1.PubKey: - kp = pc.PublicKey{ - Sum: &pc.PublicKey_Secp256K1{ + kp = cryptoproto.PublicKey{ + Sum: &cryptoproto.PublicKey_Secp256K1{ Secp256K1: k, }, } case sr25519.PubKey: - kp = pc.PublicKey{ - Sum: &pc.PublicKey_Sr25519{ + kp = cryptoproto.PublicKey{ + Sum: &cryptoproto.PublicKey_Sr25519{ Sr25519: k, }, } @@ -46,9 +46,9 @@ func PubKeyToProto(k crypto.PubKey) (pc.PublicKey, error) { } // PubKeyFromProto takes a protobuf Pubkey and transforms it to a crypto.Pubkey -func PubKeyFromProto(k pc.PublicKey) (crypto.PubKey, error) { +func PubKeyFromProto(k cryptoproto.PublicKey) (crypto.PubKey, error) { switch k := k.Sum.(type) { - case *pc.PublicKey_Ed25519: + case *cryptoproto.PublicKey_Ed25519: if len(k.Ed25519) != ed25519.PubKeySize { return nil, fmt.Errorf("invalid size for PubKeyEd25519. Got %d, expected %d", len(k.Ed25519), ed25519.PubKeySize) @@ -56,7 +56,7 @@ func PubKeyFromProto(k pc.PublicKey) (crypto.PubKey, error) { pk := make(ed25519.PubKey, ed25519.PubKeySize) copy(pk, k.Ed25519) return pk, nil - case *pc.PublicKey_Secp256K1: + case *cryptoproto.PublicKey_Secp256K1: if len(k.Secp256K1) != secp256k1.PubKeySize { return nil, fmt.Errorf("invalid size for PubKeySecp256k1. Got %d, expected %d", len(k.Secp256K1), secp256k1.PubKeySize) @@ -64,7 +64,7 @@ func PubKeyFromProto(k pc.PublicKey) (crypto.PubKey, error) { pk := make(secp256k1.PubKey, secp256k1.PubKeySize) copy(pk, k.Secp256K1) return pk, nil - case *pc.PublicKey_Sr25519: + case *cryptoproto.PublicKey_Sr25519: if len(k.Sr25519) != sr25519.PubKeySize { return nil, fmt.Errorf("invalid size for PubKeySr25519. Got %d, expected %d", len(k.Sr25519), sr25519.PubKeySize) diff --git a/crypto/secp256k1/secp256k1.go b/crypto/secp256k1/secp256k1.go index dfe34ae10..c2c0c6017 100644 --- a/crypto/secp256k1/secp256k1.go +++ b/crypto/secp256k1/secp256k1.go @@ -13,7 +13,7 @@ import ( tmjson "github.com/tendermint/tendermint/libs/json" // necessary for Bitcoin address format - "golang.org/x/crypto/ripemd160" // nolint: staticcheck + "golang.org/x/crypto/ripemd160" // nolint ) //------------------------------------- diff --git a/crypto/secp256k1/secp256k1_nocgo.go b/crypto/secp256k1/secp256k1_nocgo.go index cba9bbe4c..6b52dc5d2 100644 --- a/crypto/secp256k1/secp256k1_nocgo.go +++ b/crypto/secp256k1/secp256k1_nocgo.go @@ -1,3 +1,4 @@ +//go:build !libsecp256k1 // +build !libsecp256k1 package secp256k1 diff --git a/crypto/secp256k1/secp256k1_test.go b/crypto/secp256k1/secp256k1_test.go index 83249ef6a..7a1109293 100644 --- a/crypto/secp256k1/secp256k1_test.go +++ b/crypto/secp256k1/secp256k1_test.go @@ -5,14 +5,13 @@ import ( "math/big" "testing" + underlyingSecp256k1 "github.com/btcsuite/btcd/btcec" "github.com/btcsuite/btcutil/base58" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/secp256k1" - - underlyingSecp256k1 "github.com/btcsuite/btcd/btcec" ) type keyData struct { @@ -36,8 +35,7 @@ func TestPubKeySecp256k1Address(t *testing.T) { addrBbz, _, _ := base58.CheckDecode(d.addr) addrB := crypto.Address(addrBbz) - var priv secp256k1.PrivKey = secp256k1.PrivKey(privB) - + priv := secp256k1.PrivKey(privB) pubKey := priv.PubKey() pubT, _ := pubKey.(secp256k1.PubKey) pub := pubT diff --git a/crypto/xchacha20poly1305/xchachapoly_test.go b/crypto/xchacha20poly1305/xchachapoly_test.go index b17b1c376..75953d72d 100644 --- a/crypto/xchacha20poly1305/xchachapoly_test.go +++ b/crypto/xchacha20poly1305/xchachapoly_test.go @@ -2,8 +2,8 @@ package xchacha20poly1305 import ( "bytes" - cr "crypto/rand" - mr "math/rand" + crand "crypto/rand" + mrand "math/rand" "testing" ) @@ -19,23 +19,23 @@ func TestRandom(t *testing.T) { var nonce [24]byte var key [32]byte - al := mr.Intn(128) - pl := mr.Intn(16384) + al := mrand.Intn(128) + pl := mrand.Intn(16384) ad := make([]byte, al) plaintext := make([]byte, pl) - _, err := cr.Read(key[:]) + _, err := crand.Read(key[:]) if err != nil { t.Errorf("error on read: %w", err) } - _, err = cr.Read(nonce[:]) + _, err = crand.Read(nonce[:]) if err != nil { t.Errorf("error on read: %w", err) } - _, err = cr.Read(ad) + _, err = crand.Read(ad) if err != nil { t.Errorf("error on read: %w", err) } - _, err = cr.Read(plaintext) + _, err = crand.Read(plaintext) if err != nil { t.Errorf("error on read: %w", err) } @@ -59,7 +59,7 @@ func TestRandom(t *testing.T) { } if len(ad) > 0 { - alterAdIdx := mr.Intn(len(ad)) + alterAdIdx := mrand.Intn(len(ad)) ad[alterAdIdx] ^= 0x80 if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { t.Errorf("random #%d: Open was successful after altering additional data", i) @@ -67,14 +67,14 @@ func TestRandom(t *testing.T) { ad[alterAdIdx] ^= 0x80 } - alterNonceIdx := mr.Intn(aead.NonceSize()) + alterNonceIdx := mrand.Intn(aead.NonceSize()) nonce[alterNonceIdx] ^= 0x80 if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { t.Errorf("random #%d: Open was successful after altering nonce", i) } nonce[alterNonceIdx] ^= 0x80 - alterCtIdx := mr.Intn(len(ct)) + alterCtIdx := mrand.Intn(len(ct)) ct[alterCtIdx] ^= 0x80 if _, err := aead.Open(nil, nonce[:], ct, ad); err == nil { t.Errorf("random #%d: Open was successful after altering ciphertext", i) diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js index 59012fba1..4653e2c5b 100644 --- a/docs/.vuepress/config.js +++ b/docs/.vuepress/config.js @@ -48,10 +48,6 @@ module.exports = { { title: 'Resources', children: [ - { - title: 'Developer Sessions', - path: '/DEV_SESSIONS.html' - }, { title: 'RPC', path: 'https://docs.tendermint.com/master/rpc/', @@ -78,7 +74,7 @@ module.exports = { }, footer: { question: { - text: 'Chat with Tendermint developers in Discord or reach out on the Tendermint Forum to learn more.' + text: 'Chat with Tendermint developers in Discord or reach out on the Tendermint Forum to learn more.' }, logo: '/logo-bw.svg', textLink: { diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md index 04883e462..c1ab1580a 100644 --- a/docs/DOCS_README.md +++ b/docs/DOCS_README.md @@ -2,17 +2,27 @@ The documentation for Tendermint Core is hosted at: -- +- -built from the files in this (`/docs`) directory for -[master](https://github.com/tendermint/tendermint/tree/master/docs) respectively. +built from the files in this [`docs` directory for `master`](https://github.com/tendermint/tendermint/tree/master/docs) +and other supported release branches. ## How It Works -There is a CircleCI job listening for changes in the `/docs` directory, on both -the `master` branch. Any updates to files in this directory -on those branches will automatically trigger a website deployment. Under the hood, -the private website repository has a `make build-docs` target consumed by a CircleCI job in that repo. +There is a [GitHub Actions workflow](https://github.com/tendermint/docs/actions/workflows/deployment.yml) +in the `tendermint/docs` repository that clones and builds the documentation +site from the contents of this `docs` directory, for `master` and for each +supported release branch. Under the hood, this workflow runs `make build-docs` +from the [Makefile](../Makefile#L214). + +The list of supported versions are defined in [`config.js`](./.vuepress/config.js), +which defines the UI menu on the documentation site, and also in +[`docs/versions`](./versions), which determines which branches are built. + +The last entry in the `docs/versions` file determines which version is linked +by default from the generated `index.html`. This should generally be the most +recent release, rather than `master`, so that new users are not confused by +documentation for unreleased features. ## README diff --git a/docs/app-dev/indexing-transactions.md b/docs/app-dev/indexing-transactions.md index 15108cb05..b8b06d01b 100644 --- a/docs/app-dev/indexing-transactions.md +++ b/docs/app-dev/indexing-transactions.md @@ -62,7 +62,7 @@ be turned off regardless of other values provided. #### KV The `kv` indexer type is an embedded key-value store supported by the main -underling Tendermint database. Using the `kv` indexer type allows you to query +underlying Tendermint database. Using the `kv` indexer type allows you to query for block and transaction events directly against Tendermint's RPC. However, the query syntax is limited and so this indexer type might be deprecated or removed entirely in the future. diff --git a/docs/app-dev/readme.md b/docs/app-dev/readme.md index 51e88fc34..46ce06ca0 100644 --- a/docs/app-dev/readme.md +++ b/docs/app-dev/readme.md @@ -1,7 +1,6 @@ --- order: false parent: + title: "Building Applications" order: 3 ---- - -# Apps +--- \ No newline at end of file diff --git a/docs/architecture/README.md b/docs/architecture/README.md index a4e326274..f6c12996f 100644 --- a/docs/architecture/README.md +++ b/docs/architecture/README.md @@ -97,3 +97,6 @@ Note the context/background should be written in the present tense. - [ADR-041: Proposer-Selection-via-ABCI](./adr-041-proposer-selection-via-abci.md) - [ADR-045: ABCI-Evidence](./adr-045-abci-evidence.md) - [ADR-057: RPC](./adr-057-RPC.md) +- [ADR-069: Node Initialization](./adr-069-flexible-node-initialization.md) +- [ADR-071: Proposer-Based Timestamps](adr-071-proposer-based-timestamps.md) +- [ADR-072: Restore Requests for Comments](./adr-072-request-for-comments.md) diff --git a/docs/architecture/adr-065-custom-event-indexing.md b/docs/architecture/adr-065-custom-event-indexing.md index e6a3fdead..83a96de48 100644 --- a/docs/architecture/adr-065-custom-event-indexing.md +++ b/docs/architecture/adr-065-custom-event-indexing.md @@ -24,6 +24,8 @@ - April 1, 2021: Initial Draft (@alexanderbez) - April 28, 2021: Specify search capabilities are only supported through the KV indexer (@marbar3778) - May 19, 2021: Update the SQL schema and the eventsink interface (@jayt106) +- Aug 30, 2021: Update the SQL schema and the psql implementation (@creachadair) +- Oct 5, 2021: Clarify goals and implementation changes (@creachadair) ## Status @@ -72,19 +74,38 @@ the database used. We will adopt a similar approach to that of the Cosmos SDK's `KVStore` state listening described in [ADR-038](https://github.com/cosmos/cosmos-sdk/blob/master/docs/architecture/adr-038-state-listening.md). -Namely, we will perform the following: +We will implement the following changes: - Introduce a new interface, `EventSink`, that all data sinks must implement. - Augment the existing `tx_index.indexer` configuration to now accept a series - of one or more indexer types, i.e sinks. + of one or more indexer types, i.e., sinks. - Combine the current `TxIndexer` and `BlockIndexer` into a single `KVEventSink` that implements the `EventSink` interface. -- Introduce an additional `EventSink` that is backed by [PostgreSQL](https://www.postgresql.org/). - - Implement the necessary schemas to support both block and transaction event - indexing. +- Introduce an additional `EventSink` implementation that is backed by + [PostgreSQL](https://www.postgresql.org/). + - Implement the necessary schemas to support both block and transaction event indexing. - Update `IndexerService` to use a series of `EventSinks`. -- Proxy queries to the relevant sink's native query layer. -- Update all relevant RPC methods. + +In addition: + +- The Postgres indexer implementation will _not_ implement the proprietary `kv` + query language. Users wishing to write queries against the Postgres indexer + will connect to the underlying DBMS directly and use SQL queries based on the + indexing schema. + + Future custom indexer implementations will not be required to support the + proprietary query language either. + +- For now, the existing `kv` indexer will be left in place with its current + query support, but will be marked as deprecated in a subsequent release, and + the documentation will be updated to encourage users who need to query the + event index to migrate to the Postgres indexer. + +- In the future we may remove the `kv` indexer entirely, or replace it with a + different implementation; that decision is deferred as future work. + +- In the future, we may remove the index query endpoints from the RPC service + entirely; that decision is deferred as future work, but recommended. ## Detailed Design @@ -145,163 +166,190 @@ The postgres eventsink will not support `tx_search`, `block_search`, `GetTxByHas ```sql -- Table Definition ---------------------------------------------- -CREATE TYPE block_event_type AS ENUM ('begin_block', 'end_block', ''); +-- The blocks table records metadata about each block. +-- The block record does not include its events or transactions (see tx_results). +CREATE TABLE blocks ( + rowid BIGSERIAL PRIMARY KEY, -CREATE TABLE block_events ( - id SERIAL PRIMARY KEY, - key VARCHAR NOT NULL, - value VARCHAR NOT NULL, - height INTEGER NOT NULL, - type block_event_type, - created_at TIMESTAMPTZ NOT NULL, - chain_id VARCHAR NOT NULL + height BIGINT NOT NULL, + chain_id VARCHAR NOT NULL, + + -- When this block header was logged into the sink, in UTC. + created_at TIMESTAMPTZ NOT NULL, + + UNIQUE (height, chain_id) ); +-- Index blocks by height and chain, since we need to resolve block IDs when +-- indexing transaction records and transaction events. +CREATE INDEX idx_blocks_height_chain ON blocks(height, chain_id); + +-- The tx_results table records metadata about transaction results. Note that +-- the events from a transaction are stored separately. CREATE TABLE tx_results ( - id SERIAL PRIMARY KEY, - tx_result BYTEA NOT NULL, - created_at TIMESTAMPTZ NOT NULL + rowid BIGSERIAL PRIMARY KEY, + + -- The block to which this transaction belongs. + block_id BIGINT NOT NULL REFERENCES blocks(rowid), + -- The sequential index of the transaction within the block. + index INTEGER NOT NULL, + -- When this result record was logged into the sink, in UTC. + created_at TIMESTAMPTZ NOT NULL, + -- The hex-encoded hash of the transaction. + tx_hash VARCHAR NOT NULL, + -- The protobuf wire encoding of the TxResult message. + tx_result BYTEA NOT NULL, + + UNIQUE (block_id, index) ); -CREATE TABLE tx_events ( - id SERIAL PRIMARY KEY, - key VARCHAR NOT NULL, - value VARCHAR NOT NULL, - height INTEGER NOT NULL, - hash VARCHAR NOT NULL, - tx_result_id SERIAL, - created_at TIMESTAMPTZ NOT NULL, - chain_id VARCHAR NOT NULL, - FOREIGN KEY (tx_result_id) - REFERENCES tx_results(id) - ON DELETE CASCADE +-- The events table records events. All events (both block and transaction) are +-- associated with a block ID; transaction events also have a transaction ID. +CREATE TABLE events ( + rowid BIGSERIAL PRIMARY KEY, + + -- The block and transaction this event belongs to. + -- If tx_id is NULL, this is a block event. + block_id BIGINT NOT NULL REFERENCES blocks(rowid), + tx_id BIGINT NULL REFERENCES tx_results(rowid), + + -- The application-defined type label for the event. + type VARCHAR NOT NULL ); --- Indices ------------------------------------------------------- +-- The attributes table records event attributes. +CREATE TABLE attributes ( + event_id BIGINT NOT NULL REFERENCES events(rowid), + key VARCHAR NOT NULL, -- bare key + composite_key VARCHAR NOT NULL, -- composed type.key + value VARCHAR NULL, -CREATE INDEX idx_block_events_key_value ON block_events(key, value); -CREATE INDEX idx_tx_events_key_value ON tx_events(key, value); -CREATE INDEX idx_tx_events_hash ON tx_events(hash); + UNIQUE (event_id, key) +); + +-- A joined view of events and their attributes. Events that do not have any +-- attributes are represented as a single row with empty key and value fields. +CREATE VIEW event_attributes AS + SELECT block_id, tx_id, type, key, composite_key, value + FROM events LEFT JOIN attributes ON (events.rowid = attributes.event_id); + +-- A joined view of all block events (those having tx_id NULL). +CREATE VIEW block_events AS + SELECT blocks.rowid as block_id, height, chain_id, type, key, composite_key, value + FROM blocks JOIN event_attributes ON (blocks.rowid = event_attributes.block_id) + WHERE event_attributes.tx_id IS NULL; + +-- A joined view of all transaction events. +CREATE VIEW tx_events AS + SELECT height, index, chain_id, type, key, composite_key, value, tx_results.created_at + FROM blocks JOIN tx_results ON (blocks.rowid = tx_results.block_id) + JOIN event_attributes ON (tx_results.rowid = event_attributes.tx_id) + WHERE event_attributes.tx_id IS NOT NULL; ``` The `PSQLEventSink` will implement the `EventSink` interface as follows (some details omitted for brevity): - ```go -func NewPSQLEventSink(connStr string, chainID string) (*PSQLEventSink, error) { - db, err := sql.Open("postgres", connStr) - if err != nil { - return nil, err - } +func NewEventSink(connStr, chainID string) (*EventSink, error) { + db, err := sql.Open(driverName, connStr) + // ... - // ... + return &EventSink{ + store: db, + chainID: chainID, + }, nil } -func (es *PSQLEventSink) IndexBlockEvents(h types.EventDataNewBlockHeader) error { - sqlStmt := sq.Insert("block_events").Columns("key", "value", "height", "type", "created_at", "chain_id") +func (es *EventSink) IndexBlockEvents(h types.EventDataNewBlockHeader) error { + ts := time.Now().UTC() - // index the reserved block height index - ts := time.Now() - sqlStmt = sqlStmt.Values(types.BlockHeightKey, h.Header.Height, h.Header.Height, "", ts, es.chainID) + return runInTransaction(es.store, func(tx *sql.Tx) error { + // Add the block to the blocks table and report back its row ID for use + // in indexing the events for the block. + blockID, err := queryWithID(tx, ` +INSERT INTO blocks (height, chain_id, created_at) + VALUES ($1, $2, $3) + ON CONFLICT DO NOTHING + RETURNING rowid; +`, h.Header.Height, es.chainID, ts) + // ... - for _, event := range h.ResultBeginBlock.Events { - // only index events with a non-empty type - if len(event.Type) == 0 { - continue - } - - for _, attr := range event.Attributes { - if len(attr.Key) == 0 { - continue - } - - // index iff the event specified index:true and it's not a reserved event - compositeKey := fmt.Sprintf("%s.%s", event.Type, string(attr.Key)) - if compositeKey == types.BlockHeightKey { - return fmt.Errorf("event type and attribute key \"%s\" is reserved; please use a different key", compositeKey) - } - - if attr.GetIndex() { - sqlStmt = sqlStmt.Values(compositeKey, string(attr.Value), h.Header.Height, BlockEventTypeBeginBlock, ts, es.chainID) - } - } - } - - // index end_block events... - // execute sqlStmt db query... + // Insert the special block meta-event for height. + if err := insertEvents(tx, blockID, 0, []abci.Event{ + makeIndexedEvent(types.BlockHeightKey, fmt.Sprint(h.Header.Height)), + }); err != nil { + return fmt.Errorf("block meta-events: %w", err) + } + // Insert all the block events. Order is important here, + if err := insertEvents(tx, blockID, 0, h.ResultBeginBlock.Events); err != nil { + return fmt.Errorf("begin-block events: %w", err) + } + if err := insertEvents(tx, blockID, 0, h.ResultEndBlock.Events); err != nil { + return fmt.Errorf("end-block events: %w", err) + } + return nil + }) } -func (es *PSQLEventSink) IndexTxEvents(txr []*abci.TxResult) error { - sqlStmtEvents := sq.Insert("tx_events").Columns("key", "value", "height", "hash", "tx_result_id", "created_at", "chain_id") - sqlStmtTxResult := sq.Insert("tx_results").Columns("tx_result", "created_at") +func (es *EventSink) IndexTxEvents(txrs []*abci.TxResult) error { + ts := time.Now().UTC() - ts := time.Now() - for _, tx := range txr { - // store the tx result - txBz, err := proto.Marshal(tx) - if err != nil { - return err - } + for _, txr := range txrs { + // Encode the result message in protobuf wire format for indexing. + resultData, err := proto.Marshal(txr) + // ... - sqlStmtTxResult = sqlStmtTxResult.Values(txBz, ts) + // Index the hash of the underlying transaction as a hex string. + txHash := fmt.Sprintf("%X", types.Tx(txr.Tx).Hash()) - // execute sqlStmtTxResult db query... - var txID uint32 - err = sqlStmtTxResult.QueryRow().Scan(&txID) - if err != nil { + if err := runInTransaction(es.store, func(tx *sql.Tx) error { + // Find the block associated with this transaction. + blockID, err := queryWithID(tx, ` +SELECT rowid FROM blocks WHERE height = $1 AND chain_id = $2; +`, txr.Height, es.chainID) + // ... + + // Insert a record for this tx_result and capture its ID for indexing events. + txID, err := queryWithID(tx, ` +INSERT INTO tx_results (block_id, index, created_at, tx_hash, tx_result) + VALUES ($1, $2, $3, $4, $5) + ON CONFLICT DO NOTHING + RETURNING rowid; +`, blockID, txr.Index, ts, txHash, resultData) + // ... + + // Insert the special transaction meta-events for hash and height. + if err := insertEvents(tx, blockID, txID, []abci.Event{ + makeIndexedEvent(types.TxHashKey, txHash), + makeIndexedEvent(types.TxHeightKey, fmt.Sprint(txr.Height)), + }); err != nil { + return fmt.Errorf("indexing transaction meta-events: %w", err) + } + // Index any events packaged with the transaction. + if err := insertEvents(tx, blockID, txID, txr.Result.Events); err != nil { + return fmt.Errorf("indexing transaction events: %w", err) + } + return nil + + }); err != nil { return err } - - // index the reserved height and hash indices - hash := types.Tx(tx.Tx).Hash() - sqlStmtEvents = sqlStmtEvents.Values(types.TxHashKey, hash, tx.Height, hash, txID, ts, es.chainID) - sqlStmtEvents = sqlStmtEvents.Values(types.TxHeightKey, tx.Height, tx.Height, hash, txID, ts, es.chainID) - - for _, event := range result.Result.Events { - // only index events with a non-empty type - if len(event.Type) == 0 { - continue - } - - for _, attr := range event.Attributes { - if len(attr.Key) == 0 { - continue - } - - // index if `index: true` is set - compositeTag := fmt.Sprintf("%s.%s", event.Type, string(attr.Key)) - - // ensure event does not conflict with a reserved prefix key - if compositeTag == types.TxHashKey || compositeTag == types.TxHeightKey { - return fmt.Errorf("event type and attribute key \"%s\" is reserved; please use a different key", compositeTag) - } - - if attr.GetIndex() { - sqlStmtEvents = sqlStmtEvents.Values(compositeKey, string(attr.Value), tx.Height, hash, txID, ts, es.chainID) - } - } - } - } - - // execute sqlStmtEvents db query... + } + return nil } -func (es *PSQLEventSink) SearchBlockEvents(ctx context.Context, q *query.Query) ([]int64, error) { - return nil, errors.New("block search is not supported via the postgres event sink") -} +// SearchBlockEvents is not implemented by this sink, and reports an error for all queries. +func (es *EventSink) SearchBlockEvents(ctx context.Context, q *query.Query) ([]int64, error) -func (es *PSQLEventSink) SearchTxEvents(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) { - return nil, errors.New("tx search is not supported via the postgres event sink") -} +// SearchTxEvents is not implemented by this sink, and reports an error for all queries. +func (es *EventSink) SearchTxEvents(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) -func (es *PSQLEventSink) GetTxByHash(hash []byte) (*abci.TxResult, error) { - return nil, errors.New("getTxByHash is not supported via the postgres event sink") -} +// GetTxByHash is not implemented by this sink, and reports an error for all queries. +func (es *EventSink) GetTxByHash(hash []byte) (*abci.TxResult, error) -func (es *PSQLEventSink) HasBlock(h int64) (bool, error) { - return false, errors.New("hasBlock is not supported via the postgres event sink") -} +// HasBlock is not implemented by this sink, and reports an error for all queries. +func (es *EventSink) HasBlock(h int64) (bool, error) ``` ### Configuration diff --git a/docs/architecture/adr-069-flexible-node-intitalization.md b/docs/architecture/adr-069-flexible-node-intitalization.md new file mode 100644 index 000000000..ec66725be --- /dev/null +++ b/docs/architecture/adr-069-flexible-node-intitalization.md @@ -0,0 +1,273 @@ +# ADR 069: Flexible Node Initialization + +## Changlog + +- 2021-06-09: Initial Draft (@tychoish) + +- 2021-07-21: Major Revision (@tychoish) + +## Status + +Proposed. + +## Context + +In an effort to support [Go-API-Stability](./adr-060-go-api-stability.md), +during the 0.35 development cycle, we have attempted to reduce the the API +surface area by moving most of the interface of the `node` package into +unexported functions, as well as moving the reactors to an `internal` +package. Having this coincide with the 0.35 release made a lot of sense +because these interfaces were _already_ changing as a result of the `p2p` +[refactor](./adr-061-p2p-refactor-scope.md), so it made sense to think a bit +more about how tendermint exposes this API. + +While the interfaces of the P2P layer and most of the node package are already +internalized, this precludes some operational patterns that are important to +users who use tendermint as a library. Specifically, introspecting the +tendermint node service and replacing components is not supported in the latest +version of the code, and some of these use cases would require maintaining a +vendor copy of the code. Adding these features requires rather extensive +(internal/implementation) changes to the `node` and `rpc` packages, and this +ADR describes a model for changing the way that tendermint nodes initialize, in +service of providing this kind of functionality. + +We consider node initialization, because the current implemention +provides strong connections between all components, as well as between +the components of the node and the RPC layer, and being able to think +about the interactions of these components will help enable these +features and help define the requirements of the node package. + +## Alternative Approaches + +These alternatives are presented to frame the design space and to +contextualize the decision in terms of product requirements. These +ideas are not inherently bad, and may even be possible or desireable +in the (distant) future, and merely provide additional context for how +we, in the moment came to our decision(s). + +### Do Nothing + +The current implementation is functional and sufficient for the vast +majority of use cases (e.g., all users of the Cosmos-SDK as well as +anyone who runs tendermint and the ABCI application in separate +processes). In the current implementation, and even previous versions, +modifying node initialization or injecting custom components required +copying most of the `node` package, which required such users +to maintain a vendored copy of tendermint. + +While this is (likely) not tenable in the long term, as users do want +more modularity, and the current service implementation is brittle and +difficult to maintain, in the short term it may be possible to delay +implementation somewhat. Eventually, however, we will need to make the +`node` package easier to maintain and reason about. + +### Generic Service Pluggability + +One possible system design would export interfaces (in the Golang +sense) for all components of the system, to permit runtime dependency +injection of all components in the system, so that users can compose +tendermint nodes of arbitrary user-supplied components. + +Although this level of customization would provide benefits, it would be a huge +undertaking (particularly with regards to API design work) that we do not have +scope for at the moment. Eventually providing support for some kinds of +pluggability may be useful, so the current solution does not explicitly +foreclose the possibility of this alternative. + +### Abstract Dependency Based Startup and Shutdown + +The main proposal in this document makes tendermint node initialization simpler +and more abstract, but the system lacks a number of +features which daemon/service initialization could provide, such as a +system allowing the authors of services to control initialization and shutdown order +of components using dependency relationships. + +Such a system could work by allowing services to declare +initialization order dependencies to other reactors (by ID, perhaps) +so that the node could decide the initialization based on the +dependencies declared by services rather than requiring the node to +encode this logic directly. + +This level of configuration is probably more complicated than is needed. Given +that the authors of components in the current implementation of tendermint +already *do* need to know about other components, a dependency-based system +would probably be overly-abstract at this stage. + +## Decisions + +- To the greatest extent possible, factor the code base so that + packages are responsible for their own initialization, and minimize + the amount of code in the `node` package itself. + +- As a design goal, reduce direct coupling and dependencies between + components in the implementation of `node`. + +- Begin iterating on a more-flexible internal framework for + initializing tendermint nodes to make the initatilization process + less hard-coded by the implementation of the node objects. + + - Reactors should not need to expose their interfaces *within* the + implementation of the node type + + - This refactoring should be entirely opaque to users. + + - These node initialization changes should not require a + reevaluation of the `service.Service` or a generic initialization + orchestration framework. + +- Do not proactively provide a system for injecting + components/services within a tendtermint node, though make it + possible to retrofit this kind of plugability in the future if + needed. + +- Prioritize implementation of p2p-based statesync reactor to obviate + need for users to inject a custom state-sync provider. + +## Detailed Design + +The [current +nodeImpl](https://github.com/tendermint/tendermint/blob/master/node/node.go#L47) +includes direct references to the implementations of each of the +reactors, which should be replaced by references to `service.Service` +objects. This will require moving construction of the [rpc +service](https://github.com/tendermint/tendermint/blob/master/node/node.go#L771) +into the constructor of +[makeNode](https://github.com/tendermint/tendermint/blob/master/node/node.go#L126). One +possible implementation of this would be to eliminate the current +`ConfigureRPC` method on the node package and instead [configure it +here](https://github.com/tendermint/tendermint/pull/6798/files#diff-375d57e386f20eaa5f09f02bb9d28bfc48ac3dca18d0325f59492208219e5618R441). + +To avoid adding complexity to the `node` package, we will add a +composite service implementation to the `service` package +that implements `service.Service` and is composed of a sequence of +underlying `service.Service` objects and handles their +startup/shutdown in the specified sequential order. + +Consensus, blocksync (*née* fast sync), and statesync all depend on +each other, and have significant initialization dependencies that are +presently encoded in the `node` package. As part of this change, a +new package/component (likely named `blocks` located at +`internal/blocks`) will encapsulate the initialization of these block +management areas of the code. + +### Injectable Component Option + +This section briefly describes a possible implementation for +user-supplied services running within a node. This should not be +implemented unless user-supplied components are a hard requirement for +a user. + +In order to allow components to be replaced, a new public function +will be added to the public interface of `node` with a signature that +resembles the following: + +```go +func NewWithServices(conf *config.Config, + logger log.Logger, + cf proxy.ClientCreator, + gen *types.GenesisDoc, + srvs []service.Service, +) (service.Service, error) { +``` + +The `service.Service` objects will be initialized in the order supplied, after +all pre-configured/default services have started (and shut down in reverse +order). The given services may implement additional interfaces, allowing them +to replace specific default services. `NewWithServices` will validate input +service lists with the following rules: + +- None of the services may already be running. +- The caller may not supply more than one replacement reactor for a given + default service type. + +If callers violate any of these rules, `NewWithServices` will return +an error. To retract support for this kind of operation in the future, +the function can be modified to *always* return an error. + +## Consequences + +### Positive + +- The node package will become easier to maintain. + +- It will become easier to add additional services within tendermint + nodes. + +- It will become possible to replace default components in the node + package without vendoring the tendermint repo and modifying internal + code. + +- The current end-to-end (e2e) test suite will be able to prevent any + regressions, and the new functionality can be thoroughly unit tested. + +- The scope of this project is very narrow, which minimizes risk. + +### Negative + +- This increases our reliance on the `service.Service` interface which + is probably not an interface that we want to fully commit to. + +- This proposal implements a fairly minimal set of functionality and + leaves open the possibility for many additional features which are + not included in the scope of this proposal. + +### Neutral + +N/A + +## Open Questions + +- To what extent does this new initialization framework need to accommodate + the legacy p2p stack? Would it be possible to delay a great deal of this + work to the 0.36 cycle to avoid this complexity? + + - Answer: _depends on timing_, and the requirement to ship pluggable reactors in 0.35. + +- Where should additional public types be exported for the 0.35 + release? + + Related to the general project of API stabilization we want to deprecate + the `types` package, and move its contents into a new `pkg` hierarchy; + however, the design of the `pkg` interface is currently underspecified. + If `types` is going to remain for the 0.35 release, then we should consider + the impact of using multiple organizing modalities for this code within a + single release. + +## Future Work + +- Improve or simplify the `service.Service` interface. There are some + pretty clear limitations with this interface as written (there's no + way to timeout slow startup or shut down, the cycle between the + `service.BaseService` and `service.Service` implementations is + troubling, the default panic in `OnReset` seems troubling.) + +- As part of the refactor of `service.Service` have all services/nodes + respect the lifetime of a `context.Context` object, and avoid the + current practice of creating `context.Context` objects in p2p and + reactor code. This would be required for in-process multi-tenancy. + +- Support explicit dependencies between components and allow for + parallel startup, so that different reactors can startup at the same + time, where possible. + +## References + +- [this + branch](https://github.com/tendermint/tendermint/tree/tychoish/scratch-node-minimize) + contains experimental work in the implementation of the node package + to unwind some of the hard dependencies between components. + +- [the component + graph](https://peter.bourgon.org/go-for-industrial-programming/#the-component-graph) + as a framing for internal service construction. + +## Appendix + +### Dependencies + +There's a relationship between the blockchain and consensus reactor +described by the following dependency graph makes replacing some of +these components more difficult relative to other reactors or +components. + +![consensus blockchain dependency graph](./img/consensus_blockchain.png) diff --git a/docs/architecture/adr-071-proposer-based-timestamps.md b/docs/architecture/adr-071-proposer-based-timestamps.md new file mode 100644 index 000000000..c23488005 --- /dev/null +++ b/docs/architecture/adr-071-proposer-based-timestamps.md @@ -0,0 +1,445 @@ +# ADR 71: Proposer-Based Timestamps + +* [Changelog](#changelog) +* [Status](#status) +* [Context](#context) +* [Alternative Approaches](#alternative-approaches) + * [Remove timestamps altogether](#remove-timestamps-altogether) +* [Decision](#decision) +* [Detailed Design](#detailed-design) + * [Overview](#overview) + * [Proposal Timestamp and Block Timestamp](#proposal-timestamp-and-block-timestamp) + * [Saving the timestamp across heights](#saving-the-timestamp-across-heights) + * [Changes to `CommitSig`](#changes-to-commitsig) + * [Changes to `Commit`](#changes-to-commit) + * [Changes to `Vote` messages](#changes-to-vote-messages) + * [New consensus parameters](#new-consensus-parameters) + * [Changes to `Header`](#changes-to-header) + * [Changes to the block proposal step](#changes-to-the-block-proposal-step) + * [Proposer selects proposal timestamp](#proposer-selects-proposal-timestamp) + * [Proposer selects block timestamp](#proposer-selects-block-timestamp) + * [Proposer waits](#proposer-waits) + * [Changes to the propose step timeout](#changes-to-the-propose-step-timeout) + * [Changes to validation rules](#changes-to-validation-rules) + * [Proposal timestamp validation](#proposal-timestamp-validation) + * [Block timestamp validation](#block-timestamp-validation) + * [Changes to the prevote step](#changes-to-the-prevote-step) + * [Changes to the precommit step](#changes-to-the-precommit-step) + * [Changes to locking a block](#changes-to-locking-a-block) + * [Remove voteTime Completely](#remove-votetime-completely) +* [Future Improvements](#future-improvements) +* [Consequences](#consequences) + * [Positive](#positive) + * [Neutral](#neutral) + * [Negative](#negative) +* [References](#references) + +## Changelog + + - July 15 2021: Created by @williambanfield + - Aug 4 2021: Draft completed by @williambanfield + - Aug 5 2021: Draft updated to include data structure changes by @williambanfield + - Aug 20 2021: Language edits completed by @williambanfield + +## Status + + **Accepted** + +## Context + +Tendermint currently provides a monotonically increasing source of time known as [BFTTime](https://github.com/tendermint/spec/blob/master/spec/consensus/bft-time.md). +This mechanism for producing a source of time is reasonably simple. +Each correct validator adds a timestamp to each `Precommit` message it sends. +The timestamp it sends is either the validator's current known Unix time or one millisecond greater than the previous block time, depending on which value is greater. +When a block is produced, the proposer chooses the block timestamp as the weighted median of the times in all of the `Precommit` messages the proposer received. +The weighting is proportional to the amount of voting power, or stake, a validator has on the network. +This mechanism for producing timestamps is both deterministic and byzantine fault tolerant. + +This current mechanism for producing timestamps has a few drawbacks. +Validators do not have to agree at all on how close the selected block timestamp is to their own currently known Unix time. +Additionally, any amount of voting power `>1/3` may directly control the block timestamp. +As a result, it is quite possible that the timestamp is not particularly meaningful. + +These drawbacks present issues in the Tendermint protocol. +Timestamps are used by light clients to verify blocks. +Light clients rely on correspondence between their own currently known Unix time and the block timestamp to verify blocks they see; +However, their currently known Unix time may be greatly divergent from the block timestamp as a result of the limitations of `BFTTime`. + +The proposer-based timestamps specification suggests an alternative approach for producing block timestamps that remedies these issues. +Proposer-based timestamps alter the current mechanism for producing block timestamps in two main ways: + +1. The block proposer is amended to offer up its currently known Unix time as the timestamp for the next block. +1. Correct validators only approve the proposed block timestamp if it is close enough to their own currently known Unix time. + +The result of these changes is a more meaningful timestamp that cannot be controlled by `<= 2/3` of the validator voting power. +This document outlines the necessary code changes in Tendermint to implement the corresponding [proposer-based timestamps specification](https://github.com/tendermint/spec/tree/master/spec/consensus/proposer-based-timestamp). + +## Alternative Approaches + +### Remove timestamps altogether + +Computer clocks are bound to skew for a variety of reasons. +Using timestamps in our protocol means either accepting the timestamps as not reliable or impacting the protocol’s liveness guarantees. +This design requires impacting the protocol’s liveness in order to make the timestamps more reliable. +An alternate approach is to remove timestamps altogether from the block protocol. +`BFTTime` is deterministic but may be arbitrarily inaccurate. +However, having a reliable source of time is quite useful for applications and protocols built on top of a blockchain. + +We therefore decided not to remove the timestamp. +Applications often wish for some transactions to occur on a certain day, on a regular period, or after some time following a different event. +All of these require some meaningful representation of agreed upon time. +The following protocols and application features require a reliable source of time: +* Tendermint Light Clients [rely on correspondence between their known time](https://github.com/tendermint/spec/blob/master/spec/light-client/verification/README.md#definitions-1) and the block time for block verification. +* Tendermint Evidence validity is determined [either in terms of heights or in terms of time](https://github.com/tendermint/spec/blob/8029cf7a0fcc89a5004e173ec065aa48ad5ba3c8/spec/consensus/evidence.md#verification). +* Unbonding of staked assets in the Cosmos Hub [occurs after a period of 21 days](https://github.com/cosmos/governance/blob/ce75de4019b0129f6efcbb0e752cd2cc9e6136d3/params-change/Staking.md#unbondingtime). +* IBC packets can use either a [timestamp or a height to timeout packet delivery](https://docs.cosmos.network/v0.43/ibc/overview.html#acknowledgements). + +Finally, inflation distribution in the Cosmos Hub uses an approximation of time to calculate an annual percentage rate. +This approximation of time is calculated using [block heights with an estimated number of blocks produced in a year](https://github.com/cosmos/governance/blob/master/params-change/Mint.md#blocksperyear). +Proposer-based timestamps will allow this inflation calculation to use a more meaningful and accurate source of time. + + +## Decision + +Implement proposer-based timestamps and remove `BFTTime`. + +## Detailed Design + +### Overview + +Implementing proposer-based timestamps will require a few changes to Tendermint’s code. +These changes will be to the following components: +* The `internal/consensus/` package. +* The `state/` package. +* The `Vote`, `CommitSig`, `Commit` and `Header` types. +* The consensus parameters. + +### Proposal Timestamp and Block Timestamp + +This design discusses two timestamps: (1) The timestamp in the block and (2) the timestamp in the proposal message. +The existence and use of both of these timestamps can get a bit confusing, so some background is given here to clarify their uses. + +The [proposal message currently has a timestamp](https://github.com/tendermint/tendermint/blob/e5312942e30331e7c42b75426da2c6c9c00ae476/types/proposal.go#L31). +This timestamp is the current Unix time known to the proposer when sending the `Proposal` message. +This timestamp is not currently used as part of consensus. +The changes in this ADR will begin using the proposal message timestamp as part of consensus. +We will refer to this as the **proposal timestamp** throughout this design. + +The block has a timestamp field [in the header](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/types/block.go#L338). +This timestamp is set currently as part of Tendermint’s `BFTtime` algorithm. +It is set when a block is proposed and it is checked by the validators when they are deciding to prevote the block. +This field will continue to be used but the logic for creating and validating this timestamp will change. +We will refer to this as the **block timestamp** throughout this design. + +At a high level, the proposal timestamp from height `H` is used as the block timestamp at height `H+1`. +The following image shows this relationship. +The rest of this document describes the code changes that will make this possible. + +![](./img/pbts-message.png) + +### Saving the timestamp across heights + +Currently, `BFTtime` uses `LastCommit` to construct the block timestamp. +The `LastCommit` is created at height `H-1` and is saved in the state store to be included in the block at height `H`. +`BFTtime` takes the weighted median of the timestamps in `LastCommit.CommitSig` to build the timestamp for height `H`. + +For proposer-based timestamps, the `LastCommit.CommitSig` timestamps will no longer be used to build the timestamps for height `H`. +Instead, the proposal timestamp from height `H-1` will become the block timestamp for height `H`. +To enable this, we will add a `Timestamp` field to the `Commit` struct. +This field will be populated at each height with the proposal timestamp decided on at the previous height. +This timestamp will also be saved with the rest of the commit in the state store [when the commit is finalized](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/state.go#L1611) so that it can be recovered if Tendermint crashes. +Changes to the `CommitSig` and `Commit` struct are detailed below. + +### Changes to `CommitSig` + +The [CommitSig](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L604) struct currently contains a timestamp. +This timestamp is the current Unix time known to the validator when it issued a `Precommit` for the block. +This timestamp is no longer used and will be removed in this change. + +`CommitSig` will be updated as follows: + +```diff +type CommitSig struct { + BlockIDFlag BlockIDFlag `json:"block_id_flag"` + ValidatorAddress Address `json:"validator_address"` +-- Timestamp time.Time `json:"timestamp"` + Signature []byte `json:"signature"` +} +``` + +### Changes to `Commit` + +The [Commit](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L746) struct does not currently contain a timestamp. +The timestamps in the `Commit.CommitSig` entries are currently used to build the block timestamp. +With these timestamps removed, the commit time will instead be stored in the `Commit` struct. + +`Commit` will be updated as follows. + +```diff +type Commit struct { + Height int64 `json:"height"` + Round int32 `json:"round"` +++ Timestamp time.Time `json:"timestamp"` + BlockID BlockID `json:"block_id"` + Signatures []CommitSig `json:"signatures"` +} +``` + +### Changes to `Vote` messages + +`Precommit` and `Prevote` messages use a common [Vote struct](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/vote.go#L50). +This struct currently contains a timestamp. +This timestamp is set using the [voteTime](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/state.go#L2241) function and therefore vote times correspond to the current Unix time known to the validator. +For precommits, this timestamp is used to construct the [CommitSig that is included in the block in the LastCommit](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/block.go#L754) field. +For prevotes, this field is unused. +Proposer-based timestamps will use the [RoundState.Proposal](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/internal/consensus/types/round_state.go#L76) timestamp to construct the `signedBytes` `CommitSig`. +This timestamp is therefore no longer useful and will be dropped. + +`Vote` will be updated as follows: + +```diff +type Vote struct { + Type tmproto.SignedMsgType `json:"type"` + Height int64 `json:"height"` + Round int32 `json:"round"` + BlockID BlockID `json:"block_id"` // zero if vote is nil. +-- Timestamp time.Time `json:"timestamp"` + ValidatorAddress Address `json:"validator_address"` + ValidatorIndex int32 `json:"validator_index"` + Signature []byte `json:"signature"` +} +``` + +### New consensus parameters + +The proposer-based timestamp specification includes multiple new parameters that must be the same among all validators. +These parameters are `PRECISION`, `MSGDELAY`, and `ACCURACY`. + +The `PRECISION` and `MSGDELAY` parameters are used to determine if the proposed timestamp is acceptable. +A validator will only Prevote a proposal if the proposal timestamp is considered `timely`. +A proposal timestamp is considered `timely` if it is within `PRECISION` and `MSGDELAY` of the Unix time known to the validator. +More specifically, a proposal timestamp is `timely` if `validatorLocalTime - PRECISION < proposalTime < validatorLocalTime + PRECISION + MSGDELAY`. + +Because the `PRECISION` and `MSGDELAY` parameters must be the same across all validators, they will be added to the [consensus parameters](https://github.com/tendermint/tendermint/blob/master/proto/tendermint/types/params.proto#L13) as [durations](https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#google.protobuf.Duration). + +The proposer-based timestamp specification also includes a [new ACCURACY parameter](https://github.com/tendermint/spec/blob/master/spec/consensus/proposer-based-timestamp/pbts-sysmodel_001_draft.md#pbts-clocksync-external0). +Intuitively, `ACCURACY` represents the difference between the ‘real’ time and the currently known time of correct validators. +The currently known Unix time of any validator is always somewhat different from real time. +`ACCURACY` is the largest such difference between each validator's time and real time taken as an absolute value. +This is not something a computer can determine on its own and must be specified as an estimate by community running a Tendermint-based chain. +It is used in the new algorithm to [calculate a timeout for the propose step](https://github.com/tendermint/spec/blob/master/spec/consensus/proposer-based-timestamp/pbts-algorithm_001_draft.md#pbts-alg-startround0). +`ACCURACY` is assumed to be the same across all validators and therefore should be included as a consensus parameter. + +The consensus will be updated to include this `Timestamp` field as follows: + +```diff +type ConsensusParams struct { + Block BlockParams `json:"block"` + Evidence EvidenceParams `json:"evidence"` + Validator ValidatorParams `json:"validator"` + Version VersionParams `json:"version"` +++ Timestamp TimestampParams `json:"timestamp"` +} +``` + +```go +type TimestampParams struct { + Accuracy time.Duration `json:"accuracy"` + Precision time.Duration `json:"precision"` + MsgDelay time.Duration `json:"msg_delay"` +} +``` + +### Changes to `Header` + +The [Header](https://github.com/tendermint/tendermint/blob/a419f4df76fe4aed668a6c74696deabb9fe73211/types/block.go#L338) struct currently contains a timestamp. +This timestamp is set as the `BFTtime` derived from the block's `LastCommit.CommitSig` timestamps. +This timestamp will no longer be derived from the `LastCommit.CommitSig` timestamps and will instead be included directly into the block's `LastCommit`. +This timestamp will therfore be identical in both the `Header` and the `LastCommit`. +To clarify that the timestamp in the header corresponds to the `LastCommit`'s time, we will rename this timestamp field to `last_timestamp`. + +`Header` will be updated as follows: + +```diff +type Header struct { + // basic block info + Version version.Consensus `json:"version"` + ChainID string `json:"chain_id"` + Height int64 `json:"height"` +-- Time time.Time `json:"time"` +++ LastTimestamp time.Time `json:"last_timestamp"` + + // prev block info + LastBlockID BlockID `json:"last_block_id"` + + // hashes of block data + LastCommitHash tmbytes.HexBytes `json:"last_commit_hash"` + DataHash tmbytes.HexBytes `json:"data_hash"` + + // hashes from the app output from the prev block + ValidatorsHash tmbytes.HexBytes `json:"validators_hash"` + NextValidatorsHash tmbytes.HexBytes `json:"next_validators_hash"` + ConsensusHash tmbytes.HexBytes `json:"consensus_hash"` + AppHash tmbytes.HexBytes `json:"app_hash"` + + // root hash of all results from the txs from the previous block + LastResultsHash tmbytes.HexBytes `json:"last_results_hash"` + + // consensus info + EvidenceHash tmbytes.HexBytes `json:"evidence_hash"` + ProposerAddress Address `json:"proposer_address"` +} +``` + +### Changes to the block proposal step + +#### Proposer selects proposal timestamp + +The proposal logic already [sets the Unix time known to the validator](https://github.com/tendermint/tendermint/blob/2abfe20114ee3bb3adfee817589033529a804e4d/types/proposal.go#L44) into the `Proposal` message. +This satisfies the proposer-based timestamp specification and does not need to change. + +#### Proposer selects block timestamp + +The proposal timestamp that was decided in height `H-1` will be stored in the `State` struct's in the `RoundState.LastCommit` field. +The proposer will select this timestamp to use as the block timestamp at height `H`. + +#### Proposer waits + +Block timestamps must be monotonically increasing. +In `BFTTime`, if a validator’s clock was behind, the [validator added 1 millisecond to the previous block’s time and used that in its vote messages](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/state.go#L2246). +A goal of adding proposer-based timestamps is to enforce some degree of clock synchronization, so having a mechanism that completely ignores the Unix time of the validator time no longer works. + +Validator clocks will not be perfectly in sync. +Therefore, the proposer’s current known Unix time may be less than the `LastCommit.Timestamp`. +If the proposer’s current known Unix time is less than the `LastCommit.Timestamp`, the proposer will sleep until its known Unix time exceeds `LastCommit.Timestamp`. + +This change will require amending the [defaultDecideProposal](https://github.com/tendermint/tendermint/blob/822893615564cb20b002dd5cf3b42b8d364cb7d9/internal/consensus/state.go#L1180) method. +This method should now block until the proposer’s time is greater than `LastCommit.Timestamp`. + +#### Changes to the propose step timeout + +Currently, a validator waiting for a proposal will proceed past the propose step if the configured propose timeout is reached and no proposal is seen. +Proposer-based timestamps requires changing this timeout logic. + +The proposer will now wait until its current known Unix time exceeds the `LastCommit.Timestamp` to propose a block. +The validators must now take this and some other factors into account when deciding when to timeout the propose step. +Specifically, the propose step timeout must also take into account potential inaccuracy in the validator’s clock and in the clock of the proposer. +Additionally, there may be a delay communicating the proposal message from the proposer to the other validators. + +Therefore, validators waiting for a proposal must wait until after the `LastCommit.Timestamp` before timing out. +To account for possible inaccuracy in its own clock, inaccuracy in the proposer’s clock, and message delay, validators waiting for a proposal will wait until `LastCommit.Timesatmp + 2*ACCURACY + MSGDELAY`. + The spec defines this as `waitingTime`. + +The [propose step’s timeout is set in enterPropose](https://github.com/tendermint/tendermint/blob/822893615564cb20b002dd5cf3b42b8d364cb7d9/internal/consensus/state.go#L1108) in `state.go`. +`enterPropose` will be changed to calculate waiting time using the new consensus parameters. +The timeout in `enterPropose` will then be set as the maximum of `waitingTime` and the [configured proposal step timeout](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/config/config.go#L1013). + +### Changes to validation rules + +The rules for validating that a proposal is valid will need slight modification to implement proposer-based timestamps. +Specifically, we will change the validation logic to ensure that the proposal timestamp is `timely` and we will modify the way the block timestamp is validated as well. + +#### Proposal timestamp validation + +Adding proposal timestamp validation is a reasonably straightforward change. +The current Unix time known to the proposer is already included in the [Proposal message](https://github.com/tendermint/tendermint/blob/dc7c212c41a360bfe6eb38a6dd8c709bbc39aae7/types/proposal.go#L31). +Once the proposal is received, the complete message is stored in the `RoundState.Proposal` field. +The precommit and prevote validation logic does not currently use this timestamp. +This validation logic will be updated to check that the proposal timestamp is within `PRECISION` of the current Unix time known to the validators. +If the timestamp is not within `PRECISION` of the current Unix time known to the validator, the proposal will not be considered it valid. +The validator will also check that the proposal time is greater than the block timestamp from the previous height. + +If no valid proposal is received by the proposal timeout, the validator will prevote nil. +This is identical to the current logic. + +#### Block timestamp validation + +The [validBlock function](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/state/validation.go#L14) currently [validates the proposed block timestamp in three ways](https://github.com/tendermint/tendermint/blob/c3ae6f5b58e07b29c62bfdc5715b6bf8ae5ee951/state/validation.go#L118). +First, the validation logic checks that this timestamp is greater than the previous block’s timestamp. +Additionally, it validates that the block timestamp is correctly calculated as the weighted median of the timestamps in the [block’s LastCommit](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/block.go#L48). +Finally, the logic also authenticates the timestamps in the `LastCommit`. +The cryptographic signature in each `CommitSig` is created by signing a hash of fields in the block with the validator’s private key. +One of the items in this `signedBytes` hash is derived from the timestamp in the `CommitSig`. +To authenticate the `CommitSig` timestamp, the validator builds a hash of fields that includes the timestamp and checks this hash against the provided signature. +This takes place in the [VerifyCommit function](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/types/validation.go#L25). + +The logic to validate that the block timestamp is greater than the previous block’s timestamp also works for proposer-based timestamps and will not change. + +`BFTTime` validation is no longer applicable and will be removed. +Validators will no longer check that the block timestamp is a weighted median of `LastCommit` timestamps. +This will mean removing the call to [MedianTime in the validateBlock function](https://github.com/tendermint/tendermint/blob/4db71da68e82d5cb732b235eeb2fd69d62114b45/state/validation.go#L117). +The `MedianTime` function can be completely removed. +The `LastCommit` timestamps may also be removed. + +The `signedBytes` validation logic in `VerifyCommit` will be slightly altered. +The `CommitSig`s in the block’s `LastCommit` will no longer each contain a timestamp. +The validation logic will instead include the `LastCommit.Timestamp` in the hash of fields for generating the `signedBytes`. +The cryptographic signatures included in the `CommitSig`s will then be checked against this `signedBytes` hash to authenticate the timestamp. +Specifically, the `VerifyCommit` function will be updated to use this new timestamp. + +### Changes to the prevote step + +Currently, a validator will prevote a proposal in one of three cases: + +* Case 1: Validator has no locked block and receives a valid proposal. +* Case 2: Validator has a locked block and receives a valid proposal matching its locked block. +* Case 3: Validator has a locked block, sees a valid proposal not matching its locked block but sees +â…” prevotes for the new proposal’s block. + +The only change we will make to the prevote step is to what a validator considers a valid proposal as detailed above. + +### Changes to the precommit step + +The precommit step will not require much modification. +Its proposal validation rules will change in the same ways that validation will change in the prevote step. + +### Changes to locking a block +When a validator receives a valid proposed block and +2/3 prevotes for that block, it stores the block as its ‘locked block’ in the [RoundState.ValidBlock](https://github.com/tendermint/tendermint/blob/e8013281281985e3ada7819f42502b09623d24a0/internal/consensus/types/round_state.go#L85) field. +In each subsequent round it will prevote that block. +A validator will only change which block it has locked if it sees +2/3 prevotes for a different block. + +This mechanism will remain largely unchanged. +The only difference is the addition of proposal timestamp validation. +A validator will prevote nil in a round if the proposal message it received is not `timely`. +Prevoting nil in this case will not cause a validator to ‘unlock’ its locked block. +This difference is an incidental result of the changes to prevote validation. +It is included in this design for completeness and to clarify that no additional changes will be made to block locking. + +### Remove voteTime Completely + +[voteTime](https://github.com/tendermint/tendermint/blob/822893615564cb20b002dd5cf3b42b8d364cb7d9/internal/consensus/state.go#L2229) is a mechanism for calculating the next `BFTTime` given both the validator's current known Unix time and the previous block timestamp. +If the previous block timestamp is greater than the validator's current known Unix time, then voteTime returns a value one millisecond greater than the previous block timestamp. +This logic is used in multiple places and is no longer needed for proposer-based timestamps. +It should therefore be removed completely. + +## Future Improvements + +* Implement BLS signature aggregation. +By removing fields from the `Precommit` messages, we are able to aggregate signatures. + +## Consequences + +### Positive + +* `<2/3` of validators can no longer influence block timestamps. +* Block timestamp will have stronger correspondence to real time. +* Improves the reliability of light client block verification. +* Enables BLS signature aggregation. +* Enables evidence handling to use time instead of height for evidence validity. + +### Neutral + +* Alters Tendermint’s liveness properties. +Liveness now requires that all correct validators have synchronized clocks within a bound. +Liveness will now also require that validators’ clocks move forward, which was not required under `BFTTime`. + +### Negative + +* May increase the length of the propose step if there is a large skew between the previous proposer and the current proposer’s local Unix time. +This skew will be bound by the `PRECISION` value, so it is unlikely to be too large. + +* Current chains with block timestamps far in the future will either need to pause consensus until after the erroneous block timestamp or must maintain synchronized but very inaccurate clocks. + +## References + +* [PBTS Spec](https://github.com/tendermint/spec/tree/master/spec/consensus/proposer-based-timestamp) +* [BFTTime spec](https://github.com/tendermint/spec/blob/master/spec/consensus/bft-time.md) diff --git a/docs/architecture/adr-072-request-for-comments.md b/docs/architecture/adr-072-request-for-comments.md new file mode 100644 index 000000000..7eb22ebc9 --- /dev/null +++ b/docs/architecture/adr-072-request-for-comments.md @@ -0,0 +1,105 @@ +# ADR 72: Restore Requests for Comments + +## Changelog + +- 20-Aug-2021: Initial draft (@creachadair) + +## Status + +Proposed + +## Context + +In the past, we kept a collection of Request for Comments (RFC) documents in `docs/rfc`. +Prior to the creation of the ADR process, these documents were used to document +design and implementation decisions about Tendermint Core. The RFC directory +was removed in favor of ADRs, in commit 3761aa69 (PR +[\#6345](https://github.com/tendermint/tendermint/pull/6345)). + +For issues where an explicit design decision or implementation change is +required, an ADR is generally preferable to an open-ended RFC: An ADR is +relatively narrowly-focused, identifies a specific design or implementation +question, and documents the consensus answer to that question. + +Some discussions are more open-ended, however, or don't require a specific +decision to be made (yet). Such conversations are still valuable to document, +and several members of the Tendermint team have been doing so by writing gists +or Google docs to share them around. That works well enough in the moment, but +gists do not support any kind of collaborative editing, and both gists and docs +are hard to discover after the fact. Google docs have much better collaborative +editing, but are worse for discoverability, especially when contributors span +different Google accounts. + +Discoverability is important, because these kinds of open-ended discussions are +useful to people who come later -- either as new team members or as outside +contributors seeking to use and understand the thoughts behind our designs and +the architectural decisions that arose from those discussion. + +With these in mind, I propose that: + +- We re-create a new, initially empty `docs/rfc` directory in the repository, + and use it to capture these kinds of open-ended discussions in supplement to + ADRs. + +- Unlike in the previous RFC scheme, documents in this new directory will + _not_ be used directly for decision-making. This is the key difference + between an RFC and an ADR. + + Instead, an RFC will exist to document background, articulate general + principles, and serve as a historical record of discussion and motivation. + + In this system, an RFC may _only_ result in a decision indirectly, via ADR + documents created in response to the RFC. + + **In short:** If a decision is required, write an ADR; otherwise if a + sufficiently broad discussion is needed, write an RFC. + +Just so that there is a consistent format, I also propose that: + +- RFC files are named `rfc-XXX-title.{md,rst,txt}` and are written in plain + text, Markdown, or ReStructured Text. + +- Like an ADR, an RFC should include a high-level change log at the top of the + document, and sections for: + + * Abstract: A brief, high-level synopsis of the topic. + * Background: Any background necessary to understand the topic. + * Discussion: Detailed discussion of the issue being considered. + +- Unlike an ADR, an RFC does _not_ include sections for Decisions, Detailed + Design, or evaluation of proposed solutions. If an RFC leads to a proposal + for an actual architectural change, that must be recorded in an ADR in the + usual way, and may refer back to the RFC in its References section. + +## Alternative Approaches + +Leaving aside implementation details, the main alternative to this proposal is +to leave things as they are now, with ADRs as the only log of record and other +discussions being held informally in whatever medium is convenient at the time. + +## Decision + +(pending) + +## Detailed Design + +- Create a new `docs/rfc` directory in the `tendermint` repository. Note that + this proposal intentionally does _not_ pull back the previous contents of + that path from Git history, as those documents were appropriately merged into + the ADR process. + +- Create a `README.md` for RFCs that explains the rules and their relationship + to ADRs. + +- Create an `rfc-template.md` file for RFC files. + +## Consequences + +### Positive + +- We will have a more discoverable place to record open-ended discussions that + do not immediately result in a design change. + +### Negative + +- Potentially some people could be confused about the RFC/ADR distinction. diff --git a/docs/architecture/img/consensus_blockchain.png b/docs/architecture/img/consensus_blockchain.png new file mode 100644 index 000000000..dd0f4daa8 Binary files /dev/null and b/docs/architecture/img/consensus_blockchain.png differ diff --git a/docs/architecture/img/pbts-message.png b/docs/architecture/img/pbts-message.png new file mode 100644 index 000000000..400f35690 Binary files /dev/null and b/docs/architecture/img/pbts-message.png differ diff --git a/docs/networks/README.md b/docs/networks/README.md deleted file mode 100644 index 8528f44ed..000000000 --- a/docs/networks/README.md +++ /dev/null @@ -1,16 +0,0 @@ ---- -order: 1 -parent: - title: Networks - order: 6 ---- - -# Overview - -Use [Docker Compose](./docker-compose.md) to spin up Tendermint testnets on your -local machine. - -Use [Terraform and Ansible](./terraform-and-ansible.md) to deploy Tendermint -testnets to the cloud. - -See the `tendermint testnet --help` command for more help initializing testnets. diff --git a/docs/nodes/README.md b/docs/nodes/README.md index 9be6febf0..fd9056e0d 100644 --- a/docs/nodes/README.md +++ b/docs/nodes/README.md @@ -1,7 +1,7 @@ --- order: 1 parent: - title: Nodes + title: Node Operators order: 4 --- diff --git a/docs/nodes/configuration.md b/docs/nodes/configuration.md index b5259f93f..0c11df6f7 100644 --- a/docs/nodes/configuration.md +++ b/docs/nodes/configuration.md @@ -16,8 +16,7 @@ the parameters set with their default values. It will look something like the file below, however, double check by inspecting the `config.toml` created with your version of `tendermint` installed: -```toml -# This is a TOML config file. +```toml# This is a TOML config file. # For more information, see https://github.com/toml-lang/toml # NOTE: Any path below can be absolute (e.g. "/var/myawesomeapp/data") or @@ -34,18 +33,14 @@ like the file below, however, double check by inspecting the proxy-app = "tcp://127.0.0.1:26658" # A custom human readable name for this node -moniker = "anonymous" +moniker = "ape" -# If this node is many blocks behind the tip of the chain, BlockSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast-sync = true # Mode of Node: full | validator | seed (default: "validator") # * validator node (default) # - all reactors # - with priv_validator_key.json, priv_validator_state.json -# * full node +# * full node # - all reactors # - No priv_validator_key.json, priv_validator_state.json # * seed node @@ -53,6 +48,11 @@ fast-sync = true # - No priv_validator_key.json, priv_validator_state.json mode = "validator" +# If this node is many blocks behind the tip of the chain, FastSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +fast-sync = true + # Database backend: goleveldb | cleveldb | boltdb | rocksdb | badgerdb # * goleveldb (github.com/syndtr/goleveldb - most popular implementation) # - pure go @@ -88,16 +88,6 @@ log-format = "plain" # Path to the JSON file containing the initial validator set and other meta data genesis-file = "config/genesis.json" -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv-validator-key-file = "config/priv_validator_key.json" - -# Path to the JSON file containing the last sign state of a validator -priv-validator-state-file = "data/priv_validator_state.json" - -# TCP or UNIX socket address for Tendermint to listen on for -# connections from an external PrivValidator process -priv-validator-laddr = "" - # Path to the JSON file containing the private key to use for node authentication in the p2p protocol node-key-file = "config/node_key.json" @@ -109,6 +99,33 @@ abci = "socket" filter-peers = false +####################################################### +### Priv Validator Configuration ### +####################################################### +[priv-validator] + +# Path to the JSON file containing the private key to use as a validator in the consensus protocol +key-file = "config/priv_validator_key.json" + +# Path to the JSON file containing the last sign state of a validator +state-file = "data/priv_validator_state.json" + +# TCP or UNIX socket address for Tendermint to listen on for +# connections from an external PrivValidator process +# when the listenAddr is prefixed with grpc instead of tcp it will use the gRPC Client +laddr = "" + +# Path to the client certificate generated while creating needed files for secure connection. +# If a remote validator address is provided but no certificate, the connection will be insecure +client-certificate-file = "" + +# Client key generated while creating certificates for secure connection +validator-client-key-file = "" + +# Path to the Root Certificate Authority used to sign both client and server certificates +certificate-authority = "" + + ####################################################################### ### Advanced Configuration Options ### ####################################################################### @@ -134,6 +151,7 @@ cors-allowed-headers = ["Origin", "Accept", "Content-Type", "X-Requested-With", # TCP or UNIX socket address for the gRPC server to listen on # NOTE: This server only supports /broadcast_tx_commit +# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36. grpc-laddr = "" # Maximum number of simultaneous connections. @@ -143,9 +161,10 @@ grpc-laddr = "" # 0 - unlimited. # Should be < {ulimit -Sn} - {MaxNumInboundPeers} - {MaxNumOutboundPeers} - {N of wal, db and other open files} # 1024 - 40 - 10 - 50 = 924 = ~900 +# Deprecated gRPC in the RPC layer of Tendermint will be deprecated in 0.36. grpc-max-open-connections = 900 -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool +# Activate unsafe RPC commands like /dial-seeds and /unsafe-flush-mempool unsafe = false # Maximum number of simultaneous connections (including WebSocket). @@ -202,18 +221,31 @@ pprof-laddr = "" ####################################################### [p2p] +# Select the p2p internal queue +queue-type = "priority" + # Address to listen for incoming connections laddr = "tcp://0.0.0.0:26656" # Address to advertise to peers for them to dial # If empty, will use the same port as the laddr, # and will introspect on the listener or use UPnP -# to figure out the address. +# to figure out the address. ip and port are required +# example: 159.89.10.97:26656 external-address = "" # Comma separated list of seed nodes to connect to +# We only use these if we can’t connect to peers in the addrbook +# NOTE: not used by the new PEX reactor. Please use BootstrapPeers instead. +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 seeds = "" +# Comma separated list of peers to be added to the peer store +# on startup. Either BootstrapPeers or PersistentPeers are +# needed for peer discovery +bootstrap-peers = "" + # Comma separated list of nodes to keep persistent connections to persistent-peers = "" @@ -221,6 +253,8 @@ persistent-peers = "" upnp = false # Path to address book +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 addr-book-file = "config/addrbook.json" # Set true for strict address routability rules @@ -228,9 +262,15 @@ addr-book-file = "config/addrbook.json" addr-book-strict = true # Maximum number of inbound peers +# +# TODO: Remove once p2p refactor is complete in favor of MaxConnections. +# ref: https://github.com/tendermint/tendermint/issues/5670 max-num-inbound-peers = 40 # Maximum number of outbound peers to connect to, excluding persistent peers +# +# TODO: Remove once p2p refactor is complete in favor of MaxConnections. +# ref: https://github.com/tendermint/tendermint/issues/5670 max-num-outbound-peers = 10 # Maximum number of connections (inbound and outbound). @@ -240,27 +280,40 @@ max-connections = 64 max-incoming-connection-attempts = 100 # List of node IDs, to which a connection will be (re)established ignoring any existing limits +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 unconditional-peer-ids = "" # Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 persistent-peers-max-dial-period = "0s" # Time to wait before flushing messages out on the connection +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 flush-throttle-timeout = "100ms" # Maximum size of a message packet payload, in bytes -max-packet-msg-payload-size = 1024 +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 +max-packet-msg-payload-size = 1400 # Rate at which packets can be sent, in bytes/second +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 send-rate = 5120000 # Rate at which packets can be received, in bytes/second +# TODO: Remove once p2p refactor is complete +# ref: https:#github.com/tendermint/tendermint/issues/5670 recv-rate = 5120000 # Set true to enable the peer-exchange reactor pex = true # Comma separated list of peer IDs to keep private (will not be gossiped to other peers) +# Warning: IPs will be exposed at /net_info, for more information https://github.com/tendermint/tendermint/issues/3055 private-peer-ids = "" # Toggle to disable guard against peers connecting from the same ip. @@ -353,14 +406,26 @@ discovery-time = "15s" # Will create a new, randomly named directory within, and remove it when done. temp-dir = "" +# The timeout duration before re-requesting a chunk, possibly from a different +# peer (default: 15 seconds). +chunk-request-timeout = "15s" + +# The number of concurrent chunk and block fetchers to run (default: 4). +fetchers = "4" + ####################################################### -### BlockSync Configuration Connections ### +### Block Sync Configuration Connections ### ####################################################### -[fastsync] +[blocksync] + +# If this node is many blocks behind the tip of the chain, BlockSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +enable = true # Block Sync version to use: -# 1) "v0" (default) - the legacy block sync implementation -# 2) "v2" - complete redesign of v0, optimized for testability & readability +# 1) "v0" (default) - the standard block sync implementation +# 2) "v2" - DEPRECATED, please use v0 version = "v0" ####################################################### @@ -409,7 +474,8 @@ peer-query-maj23-sleep-duration = "2s" ####################################################### [tx-index] -# What indexer to use for transactions +# The backend database list to back the indexer. +# If list contains "null" or "", meaning no indexer service will be used. # # The application will set which txs to index. In some cases a node operator will be able # to decide which txs to index based on configuration set in the application. @@ -417,8 +483,13 @@ peer-query-maj23-sleep-duration = "2s" # Options: # 1) "null" # 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed. -indexer = "kv" +# 3) "psql" - the indexer services backed by PostgreSQL. +# When "kv" or "psql" is chosen "tx.height" and "tx.hash" will always be indexed. +indexer = ["kv"] + +# The PostgreSQL connection configuration, the connection format: +# postgresql://:@:/? +psql-conn = "" ####################################################### ### Instrumentation Configuration Options ### @@ -519,10 +590,61 @@ This section will cover settings within the p2p section of the `config.toml`. - `external-address` = is the address that will be advertised for other nodes to use. We recommend setting this field with your public IP and p2p port. - > We recommend setting an external address. When used in a private network, Tendermint Core currently doesn't advertise the node's public address. There is active and ongoing work to improve the P2P system, but this is a helpful workaround for now. -- `seeds` = is a list of comma separated seed nodes that you will connect upon a start and ask for peers. A seed node is a node that does not participate in consensus but only helps propagate peers to nodes in the networks - `persistent-peers` = is a list of comma separated peers that you will always want to be connected to. If you're already connected to the maximum number of peers, persistent peers will not be added. -- `max-num-inbound-peers` = is the maximum number of peers you will accept inbound connections from at one time (where they dial your address and initiate the connection). -- `max-num-outbound-peers` = is the maximum number of peers you will initiate outbound connects to at one time (where you dial their address and initiate the connection). -- `unconditional-peer-ids` = is similar to `persistent-peers` except that these peers will be connected to even if you are already connected to the maximum number of peers. This can be a validator node ID on your sentry node. - `pex` = turns the peer exchange reactor on or off. Validator node will want the `pex` turned off so it would not begin gossiping to unknown peers on the network. PeX can also be turned off for statically configured networks with fixed network connectivity. For full nodes on open, dynamic networks, it should be turned on. - `private-peer-ids` = is a comma-separated list of node ids that will _not_ be exposed to other peers (i.e., you will not tell other peers about the ids in this list). This can be filled with a validator's node id. + +Recently the Tendermint Team conducted a refactor of the p2p layer. This lead to multiple config paramters being deprecated and/or replaced. + +We will cover the new and deprecated parameters below. +### New Parameters + +There are three new parameters, which are enabled if use-legacy is set to false. + +- `queue-type` = sets a type of queue to use in the p2p layer. There are three options available `fifo`, `priority` and `wdrr`. The default is priority +- `bootstrap-peers` = is a list of comma seperated peers which will be used to bootstrap the address book. +- `max-connections` = is the max amount of allowed inbound and outbound connections. +### Deprecated Parameters + +> Note: For Tendermint 0.35, there are two p2p implementations. The old version is used by deafult with the deprecated fields. The new implementation uses different config parameters, explained above. + +- `max-num-inbound-peers` = is the maximum number of peers you will accept inbound connections from at one time (where they dial your address and initiate the connection). *This was replaced by `max-connections`* +- `max-num-outbound-peers` = is the maximum number of peers you will initiate outbound connects to at one time (where you dial their address and initiate the connection).*This was replaced by `max-connections`* +- `unconditional-peer-ids` = is similar to `persistent-peers` except that these peers will be connected to even if you are already connected to the maximum number of peers. This can be a validator node ID on your sentry node. *Deprecated* +- `seeds` = is a list of comma separated seed nodes that you will connect upon a start and ask for peers. A seed node is a node that does not participate in consensus but only helps propagate peers to nodes in the networks *Deprecated, replaced by bootstrap peers* + +## Indexing Settings + +Operators can configure indexing via the `[tx_index]` section. The `indexer` +field takes a series of supported indexers. If `null` is included, indexing will +be turned off regardless of other values provided. + +### Supported Indexers + +#### KV + +The `kv` indexer type is an embedded key-value store supported by the main +underlying Tendermint database. Using the `kv` indexer type allows you to query +for block and transaction events directly against Tendermint's RPC. However, the +query syntax is limited and so this indexer type might be deprecated or removed +entirely in the future. + +#### PostgreSQL + +The `psql` indexer type allows an operator to enable block and transaction event +indexing by proxying it to an external PostgreSQL instance allowing for the events +to be stored in relational models. Since the events are stored in a RDBMS, operators +can leverage SQL to perform a series of rich and complex queries that are not +supported by the `kv` indexer type. Since operators can leverage SQL directly, +searching is not enabled for the `psql` indexer type via Tendermint's RPC -- any +such query will fail. + +Note, the SQL schema is stored in `state/indexer/sink/psql/schema.sql` and operators +must explicitly create the relations prior to starting Tendermint and enabling +the `psql` indexer type. + +Example: + +```shell +$ psql ... -f state/indexer/sink/psql/schema.sql +``` diff --git a/docs/rfc/README.md b/docs/rfc/README.md new file mode 100644 index 000000000..c3adfa08a --- /dev/null +++ b/docs/rfc/README.md @@ -0,0 +1,47 @@ +--- +order: 1 +parent: + order: false +--- + +# Requests for Comments + +A Request for Comments (RFC) is a record of discussion on an open-ended topic +related to the design and implementation of Tendermint Core, for which no +immediate decision is required. + +The purpose of an RFC is to serve as a historical record of a high-level +discussion that might otherwise only be recorded in an ad hoc way (for example, +via gists or Google docs) that are difficult to discover for someone after the +fact. An RFC _may_ give rise to more specific architectural _decisions_ for +Tendermint, but those decisions must be recorded separately in [Architecture +Decision Records (ADR)](./../architecture). + +As a rule of thumb, if you can articulate a specific question that needs to be +answered, write an ADR. If you need to explore the topic and get input from +others to know what questions need to be answered, an RFC may be appropriate. + +## RFC Content + +An RFC should provide: + +- A **changelog**, documenting when and how the RFC has changed. +- An **abstract**, briefly summarizing the topic so the reader can quickly tell + whether it is relevant to their interest. +- Any **background** a reader will need to understand and participate in the + substance of the discussion (links to other documents are fine here). +- The **discussion**, the primary content of the document. + +The [rfc-template.md](./rfc-template.md) file includes placeholders for these +sections. + +## Table of Contents + +- [RFC-000: P2P Roadmap](./rfc-000-p2p-roadmap.rst) +- [RFC-001: Storage Engines](./rfc-001-storage-engine.rst) +- [RFC-002: Interprocess Communication](./rfc-002-ipc-ecosystem.md) +- [RFC-003: Performance Taxonomy](./rfc-003-performance-questions.md) +- [RFC-004: E2E Test Framework Enhancements](./rfc-004-e2e-framework.md) +- [RFC-005: Event System](./rfc-005-event-system.rst) + + diff --git a/docs/rfc/rfc-000-p2p-roadmap.rst b/docs/rfc/rfc-000-p2p-roadmap.rst new file mode 100644 index 000000000..dc9b54c7f --- /dev/null +++ b/docs/rfc/rfc-000-p2p-roadmap.rst @@ -0,0 +1,316 @@ +==================== +RFC 000: P2P Roadmap +==================== + +Changelog +--------- + +- 2021-08-20: Completed initial draft and distributed via a gist +- 2021-08-25: Migrated as an RFC and changed format + +Abstract +-------- + +This document discusses the future of peer network management in Tendermint, with +a particular focus on features, semantics, and a proposed roadmap. +Specifically, we consider libp2p as a tool kit for implementing some fundamentals. + +Background +---------- + +For the 0.35 release cycle the switching/routing layer of Tendermint was +replaced. This work was done "in place," and produced a version of Tendermint +that was backward-compatible and interoperable with previous versions of the +software. While there are new p2p/peer management constructs in the new +version (e.g. ``PeerManager`` and ``Router``), the main effect of this change +was to simplify the ways that other components within Tendermint interacted with +the peer management layer, and to make it possible for higher-level components +(specifically the reactors), to be used and tested more independently. + +This refactoring, which was a major undertaking, was entirely necessary to +enable areas for future development and iteration on this aspect of +Tendermint. There are also a number of potential user-facing features that +depend heavily on the p2p layer: additional transport protocols, transport +compression, improved resilience to network partitions. These improvements to +modularity, stability, and reliability of the p2p system will also make +ongoing maintenance and feature development easier in the rest of Tendermint. + +Critique of Current Peer-to-Peer Infrastructure +--------------------------------------- + +The current (refactored) P2P stack is an improvement on the previous iteration +(legacy), but as of 0.35, there remains room for improvement in the design and +implementation of the P2P layer. + +Some limitations of the current stack include: + +- heavy reliance on buffering to avoid backups in the flow of components, + which is fragile to maintain and can lead to unexpected memory usage + patterns and forces the routing layer to make decisions about when messages + should be discarded. + +- the current p2p stack relies on convention (rather than the compiler) to + enforce the API boundaries and conventions between reactors and the router, + making it very easy to write "wrong" reactor code or introduce a bad + dependency. + +- the current stack is probably more complex and difficult to maintain because + the legacy system must coexist with the new components in 0.35. When the + legacy stack is removed there are some simple changes that will become + possible and could reduce the complexity of the new system. (e.g. `#6598 + `_.) + +- the current stack encapsulates a lot of information about peers, and makes it + difficult to expose that information to monitoring/observability tools. This + general opacity also makes it difficult to interact with the peer system + from other areas of the code base (e.g. tests, reactors). + +- the legacy stack provided some control to operators to force the system to + dial new peers or seed nodes or manipulate the topology of the system _in + situ_. The current stack can't easily provide this, and while the new stack + may have better behavior, it does leave operators hands tied. + +Some of these issues will be resolved early in the 0.36 cycle, with the +removal of the legacy components. + +The 0.36 release also provides the opportunity to make changes to the +protocol, as the release will not be compatible with previous releases. + +Areas for Development +--------------------- + +These sections describe features that may make sense to include in a Phase 2 of +a P2P project. + +Internal Message Passing +~~~~~~~~~~~~~~~~~~~~~~~~ + +Currently, there's no provision for intranode communication using the P2P +layer, which means when two reactors need to interact with each other they +have to have dependencies on each other's interfaces, and +initialization. Changing these interactions (e.g. transitions between +blocksync and consensus) from procedure calls to message passing. + +This is a relatively simple change and could be implemented with the following +components: + +- a constant to represent "local" delivery as the ``To`` field on + ``p2p.Envelope``. + +- special path for routing local messages that doesn't require message + serialization (protobuf marshalling/unmarshaling). + +Adding these semantics, particularly if in conjunction with synchronous +semantics provides a solution to dependency graph problems currently present +in the Tendermint codebase, which will simplify development, make it possible +to isolate components for testing. + +Eventually, this will also make it possible to have a logical Tendermint node +running in multiple processes or in a collection of containers, although the +usecase of this may be debatable. + +Synchronous Semantics (Paired Request/Response) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In the current system, all messages are sent with fire-and-forget semantics, +and there's no coupling between a request sent via the p2p layer, and a +response. These kinds of semantics would simplify the implementation of +state and block sync reactors, and make intra-node message passing more +powerful. + +For some interactions, like gossiping transactions between the mempools of +different nodes, fire-and-forget semantics make sense, but for other +operations the missing link between requests/responses leads to either +inefficiency when a node fails to respond or becomes unavailable, or code that +is just difficult to follow. + +To support this kind of work, the protocol would need to accommodate some kind +of request/response ID to allow identifying out-of-order responses over a +single connection. Additionally, expanded the programming model of the +``p2p.Channel`` to accommodate some kind of _future_ or similar paradigm to +make it viable to write reactor code without needing for the reactor developer +to wrestle with lower level concurrency constructs. + + +Timeout Handling (QoS) +~~~~~~~~~~~~~~~~~~~~~~ + +Currently, all timeouts, buffering, and QoS features are handled at the router +layer, and the reactors are implemented in ways that assume/require +asynchronous operation. This both increases the required complexity at the +routing layer, and means that misbehavior at the reactor level is difficult to +detect or attribute. Additionally, the current system provides three main +parameters to control quality of service: + +- buffer sizes for channels and queues. + +- priorities for channels + +- queue implementation details for shedding load. + +These end up being quite coarse controls, and changing the settings are +difficult because as the queues and channels are able to buffer large numbers +of messages it can be hard to see the impact of a given change, particularly +in our extant test environment. In general, we should endeavor to: + +- set real timeouts, via contexts, on most message send operations, so that + senders rather than queues can be responsible for timeout + logic. Additionally, this will make it possible to avoid sending messages + during shutdown. + +- reduce (to the greatest extent possible) the amount of buffering in + channels and the queues, to more readily surface backpressure and reduce the + potential for buildup of stale messages. + +Stream Based Connection Handling +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Currently the transport layer is message based, which makes sense from a +mental model of how the protocol works, but makes it more difficult to +implement transports and connection types, as it forces a higher level view of +the connection and interaction which makes it harder to implement for novel +transport types and makes it more likely that message-based caching and rate +limiting will be implemented at the transport layer rather than at a more +appropriate level. + +The transport then, would be responsible for negotiating the connection and the +handshake and otherwise behave like a socket/file descriptor with ``Read`` and +``Write`` methods. + +While this was included in the initial design for the new P2P layer, it may be +obviated entirely if the transport and peer layer is replaced with libp2p, +which is primarily stream based. + +Service Discovery +~~~~~~~~~~~~~~~~~ + +In the current system, Tendermint assumes that all nodes in a network are +largely equivalent, and nodes tend to be "chatty" making many requests of +large numbers of peers and waiting for peers to (hopefully) respond. While +this works and has allowed Tendermint to get to a certain point, this both +produces a theoretical scaling bottle neck and makes it harder to test and +verify components of the system. + +In addition to peer's identity and connection information, peers should be +able to advertise a number of services or capabilities, and node operators or +developers should be able to specify peer capability requirements (e.g. target +at least -percent of peers with capability.) + +These capabilities may be useful in selecting peers to send messages to, it +may make sense to extend Tendermint's message addressing capability to allow +reactors to send messages to groups of peers based on role rather than only +allowing addressing to one or all peers. + +Having a good service discovery mechanism may pair well with the synchronous +semantics (request/response) work, as it allows reactors to "make a request of +a peer with capability and wait for the response," rather force the +reactors to need to track the capabilities or state of specific peers. + +Solutions +--------- + +Continued Homegrown Implementation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The current peer system is homegrown and is conceptually compatible with the +needs of the project, and while there are limitations to the system, the p2p +layer is not (currently as of 0.35) a major source of bugs or friction during +development. + +However, the current implementation makes a number of allowances for +interoperability, and there are a collection of iterative improvements that +should be considered in the next couple of releases. To maintain the current +implementation, upcoming work would include: + +- change the ``Transport`` mechanism to facilitate easier implementations. + +- implement different ``Transport`` handlers to be able to manage peer + connections using different protocols (e.g. QUIC, etc.) + +- entirely remove the constructs and implementations of the legacy peer + implementation. + +- establish and enforce clearer chains of responsibility for connection + establishment (e.g. handshaking, setup,) which is currently shared between + three components. + +- report better metrics regarding the into the state of peers and network + connectivity, which are opaque outside of the system. This is constrained at + the moment as a side effect of the split responsibility for connection + establishment. + +- extend the PEX system to include service information so that nodes in the + network weren't necessarily homogeneous. + +While maintaining a bespoke peer management layer would seem to distract from +development of core functionality, the truth is that (once the legacy code is +removed,) the scope of the peer layer is relatively small from a maintenance +perspective, and having control at this layer might actually afford the +project with the ability to more rapidly iterate on some features. + +LibP2P +~~~~~~ + +LibP2P provides components that, approximately, account for the +``PeerManager`` and ``Transport`` components of the current (new) P2P +stack. The Go APIs seem reasonable, and being able to externalize the +implementation details of peer and connection management seems like it could +provide a lot of benefits, particularly in supporting a more active ecosystem. + +In general the API provides the kind of stream-based, multi-protocol +supporting, and idiomatic baseline for implementing a peer layer. Additionally +because it handles peer exchange and connection management at a lower +level, by using libp2p it'd be possible to remove a good deal of code in favor +of just using libp2p. Having said that, Tendermint's P2P layer covers a +greater scope (e.g. message routing to different peers) and that layer is +something that Tendermint might want to retain. + +The are a number of unknowns that require more research including how much of +a peer database the Tendermint engine itself needs to maintain, in order to +support higher level operations (consensus, statesync), but it might be the +case that our internal systems need to know much less about peers than +otherwise specified. Similarly, the current system has a notion of peer +scoring that cannot be communicated to libp2p, which may be fine as this is +only used to support peer exchange (PEX,) which would become a property libp2p +and not expressed in it's current higher-level form. + +In general, the effort to switch to libp2p would involve: + +- timing it during an appropriate protocol-breaking window, as it doesn't seem + viable to support both libp2p *and* the current p2p protocol. + +- providing some in-memory testing network to support the use case that the + current ``p2p.MemoryNetwork`` provides. + +- re-homing the ``p2p.Router`` implementation on top of libp2p components to + be able to maintain the current reactor implementations. + +Open question include: + +- how much local buffering should we be doing? It sort of seems like we should + figure out what the expected behavior is for libp2p for QoS-type + functionality, and if our requirements mean that we should be implementing + this on top of things ourselves? + +- if Tendermint was going to use libp2p, how would libp2p's stability + guarantees (protocol, etc.) impact/constrain Tendermint's stability + guarantees? + +- what kind of introspection does libp2p provide, and to what extend would + this change or constrain the kind of observability that Tendermint is able + to provide? + +- how do efforts to select "the best" (healthy, close, well-behaving, etc.) + peers work out if Tendermint is not maintaining a local peer database? + +- would adding additional higher level semantics (internal message passing, + request/response pairs, service discovery, etc.) facilitate removing some of + the direct linkages between constructs/components in the system and reduce + the need for Tendermint nodes to maintain state about its peers? + +References +---------- + +- `Tracking Ticket for P2P Refactor Project `_ +- `ADR 61: P2P Refactor Scope <../architecture/adr-061-p2p-refactor-scope.md>`_ +- `ADR 62: P2P Architecture and Abstraction <../architecture/adr-061-p2p-architecture.md>`_ diff --git a/docs/rfc/rfc-001-storage-engine.rst b/docs/rfc/rfc-001-storage-engine.rst new file mode 100644 index 000000000..560e8a8b3 --- /dev/null +++ b/docs/rfc/rfc-001-storage-engine.rst @@ -0,0 +1,179 @@ +=========================================== +RFC 001: Storage Engines and Database Layer +=========================================== + +Changelog +--------- + +- 2021-04-19: Initial Draft (gist) +- 2021-09-02: Migrated to RFC folder, with some updates + +Abstract +-------- + +The aspect of Tendermint that's responsible for persistence and storage (often +"the database" internally) represents a bottle neck in the architecture of the +platform, that the 0.36 release presents a good opportunity to correct. The +current storage engine layer provides a great deal of flexibility that is +difficult for users to leverage or benefit from, while also making it harder +for Tendermint Core developers to deliver improvements on storage engine. This +RFC discusses the possible improvements to this layer of the system. + +Background +---------- + +Tendermint has a very thin common wrapper that makes Tendermint itself +(largely) agnostic to the data storage layer (within the realm of the popular +key-value/embedded databases.) This flexibility is not particularly useful: +the benefits of a specific database engine in the context of Tendermint is not +particularly well understood, and the maintenance burden for multiple backends +is not commensurate with the benefit provided. Additionally, because the data +storage layer is handled generically, and most tests run with an in-memory +framework, it's difficult to take advantage of any higher-level features of a +database engine. + +Ideally, developers within Tendermint will be able to interact with persisted +data via an interface that can function, approximately like an object +store, and this storage interface will be able to accommodate all existing +persistence workloads (e.g. block storage, local peer management information +like the "address book", crash-recovery log like the WAL.) In addition to +providing a more ergonomic interface and new semantics, by selecting a single +storage engine tendermint can use native durability and atomicity features of +the storage engine and simplify its own implementations. + +Data Access Patterns +~~~~~~~~~~~~~~~~~~~~ + +Tendermint's data access patterns have the following characteristics: + +- aggregate data size often exceeds memory. + +- data is rarely mutated after it's written for most data (e.g. blocks), but + small amounts of working data is persisted by nodes and is frequently + mutated (e.g. peer information, validator information.) + +- read patterns can be quite random. + +- crash resistance and crash recovery, provided by write-ahead-logs (in + consensus, and potentially for the mempool) should allow the system to + resume work after an unexpected shut down. + +Project Goals +~~~~~~~~~~~~~ + +As we think about replacing the current persistence layer, we should consider +the following high level goals: + +- drop dependencies on storage engines that have a CGo dependency. + +- encapsulate data format and data storage from higher-level services + (e.g. reactors) within tendermint. + +- select a storage engine that does not incur any additional operational + complexity (e.g. database should be embedded.) + +- provide database semantics with sufficient ACID, snapshots, and + transactional support. + +Open Questions +~~~~~~~~~~~~~~ + +The following questions remain: + +- what kind of data-access concurrency does tendermint require? + +- would tendermint users SDK/etc. benefit from some shared database + infrastructure? + + - In earlier conversations it seemed as if the SDK has selected Badger and + RocksDB for their storage engines, and it might make sense to be able to + (optionally) pass a handle to a Badger instance between the libraries in + some cases. + +- what are typical data sizes, and what kinds of memory sizes can we expect + operators to be able to provide? + +- in addition to simple persistence, what kind of additional semantics would + tendermint like to enjoy (e.g. transactional semantics, unique constraints, + indexes, in-place-updates, etc.)? + +Decision Framework +~~~~~~~~~~~~~~~~~~ + +Given the constraint of removing the CGo dependency, the decision is between +"badger" and "boltdb" (in the form of the etcd/CoreOS fork,) as low level. On +top of this and somewhat orthogonally, we must also decide on the interface to +the database and how the larger application will have to interact with the +database layer. Users of the data layer shouldn't ever need to interact with +raw byte slices from the database, and should mostly have the experience of +interacting with Go-types. + +Badger is more consistently developed and has a broader feature set than +Bolt. At the same time, Badger is likely more memory intensive and may have +more overhead in terms of open file handles given it's model. At first glance, +Badger is the obvious choice: it's actively developed and it has a lot of +features that could be useful. Bolt is not without some benefits: it's stable +and is maintained by the etcd folks, it's simpler model (single memory mapped +file, etc,) may be easier to reason about. + +I propose that we consider the following specific questions about storage +engines: + +- does Badger's evolving development, which may result in data file format + changes in the future, and could restrict our access to using the latest + version of the library between major upgrades, present a problem? + +- do we do we have goals/concerns about memory footprint that Badger may + prevent us from hitting, particularly as data sets grow over time? + +- what kind of additional tooling might we need/like to build (dump/restore, + etc.)? + +- do we want to run unit/integration tests against a data files on disk rather + than relying exclusively on the memory database? + +Project Scope +~~~~~~~~~~~~~ + +This project will consist of the following aspects: + +- selecting a storage engine, and modifying the tendermint codebase to + disallow any configuration of the storage engine outside of the tendermint. + +- remove the dependency on the current tm-db interfaces and replace with some + internalized, safe, and ergonomic interface for data persistence with all + required database semantics. + +- update core tendermint code to use the new interface and data tools. + +Next Steps +~~~~~~~~~~ + +- circulate the RFC, and discuss options with appropriate stakeholders. + +- write brief ADR to summarize decisions around technical decisions reached + during the RFC phase. + +References +---------- + +- `bolddb `_ +- `badger `_ +- `badgerdb overview `_ +- `botldb overview `_ +- `boltdb vs badger `_ +- `bolthold `_ +- `badgerhold `_ +- `Pebble `_ +- `SDK Issue Regarding IVAL `_ +- `SDK Discussion about SMT/IVAL `_ + +Discussion +---------- + +- All things being equal, my tendency would be to use badger, with badgerhold + (if that makes sense) for its ergonomics and indexing capabilities, which + will require some small selection of wrappers for better write transaction + support. This is a weakly held tendency/belief and I think it would be + useful for the RFC process to build consensus (or not) around this basic + assumption. diff --git a/docs/rfc/rfc-002-ipc-ecosystem.md b/docs/rfc/rfc-002-ipc-ecosystem.md new file mode 100644 index 000000000..9b51beb7f --- /dev/null +++ b/docs/rfc/rfc-002-ipc-ecosystem.md @@ -0,0 +1,420 @@ +# RFC 002: Interprocess Communication (IPC) in Tendermint + +## Changelog + +- 08-Sep-2021: Initial draft (@creachadair). + + +## Abstract + +Communication in Tendermint among consensus nodes, applications, and operator +tools all use different message formats and transport mechanisms. In some +cases there are multiple options. Having all these options complicates both the +code and the developer experience, and hides bugs. To support a more robust, +trustworthy, and usable system, we should document which communication paths +are essential, which could be removed or reduced in scope, and what we can +improve for the most important use cases. + +This document proposes a variety of possible improvements of varying size and +scope. Specific design proposals should get their own documentation. + + +## Background + +The Tendermint state replication engine has a complex IPC footprint. + +1. Consensus nodes communicate with each other using a networked peer-to-peer + message-passing protocol. + +2. Consensus nodes communicate with the application whose state is being + replicated via the [Application BlockChain Interface (ABCI)][abci]. + +3. Consensus nodes export a network-accessible [RPC service][rpc-service] to + support operations (bootstrapping, debugging) and synchronization of [light clients][light-client]. + This interface is also used by the [`tendermint` CLI][tm-cli]. + +4. Consensus nodes export a gRPC service exposing a subset of the methods of + the RPC service described by (3). This was intended to simplify the + implementation of tools that already use gRPC to communicate with an + application (via the Cosmos SDK), and wanted to also talk to the consensus + node without implementing yet another RPC protocol. + + The gRPC interface to the consensus node has been deprecated and is slated + for removal in the forthcoming Tendermint v0.36 release. + +5. Consensus nodes may optionally communicate with a "remote signer" that holds + a validator key and can provide public keys and signatures to the consensus + node. One of the stated goals of this configuration is to allow the signer + to be run on a private network, separate from the consensus node, so that a + compromise of the consensus node from the public network would be less + likely to expose validator keys. + +## Discussion: Transport Mechanisms + +### Remote Signer Transport + +A remote signer communicates with the consensus node in one of two ways: + +1. "Raw": Using a TCP or Unix-domain socket which carries varint-prefixed + protocol buffer messages. In this mode, the consensus node is the server, + and the remote signer is the client. + + This mode has been deprecated, and is intended to be removed. + +2. gRPC: This mode uses the same protobuf messages as "Raw" node, but uses a + standard encrypted gRPC HTTP/2 stub as the transport. In this mode, the + remote signer is the server and the consensus node is the client. + + +### ABCI Transport + +In ABCI, the _application_ is the server, and the Tendermint consensus engine +is the client. Most applications implement the server using the [Cosmos SDK][cosmos-sdk], +which handles low-level details of the ABCI interaction and provides a +higher-level interface to the rest of the application. The SDK is written in Go. + +Beneath the SDK, the application communicates with Tendermint core in one of +two ways: + +- In-process direct calls (for applications written in Go and compiled against + the Tendermint code). This is an optimization for the common case where an + application is written in Go, to save on the overhead of marshaling and + unmarshaling requests and responses within the same process: + [`abci/client/local_client.go`][local-client] + +- A custom remote procedure protocol built on wire-format protobuf messages + using a socket (the "socket protocol"): [`abci/server/socket_server.go`][socket-server] + +The SDK also provides a [gRPC service][sdk-grpc] accessible from outside the +application, allowing transactions to be broadcast to the network, look up +transactions, and simulate transaction costs. + + +### RPC Transport + +The consensus node RPC service allows callers to query consensus parameters +(genesis data, transactions, commits), node status (network info, health +checks), application state (abci_query, abci_info), mempool state, and other +attributes of the node and its application. The service also provides methods +allowing transactions and evidence to be injected ("broadcast") into the +blockchain. + +The RPC service is exposed in several ways: + +- HTTP GET: Queries may be sent as URI parameters, with method names in the path. + +- HTTP POST: Queries may be sent as JSON-RPC request messages in the body of an + HTTP POST request. The server uses a custom implementation of JSON-RPC that + is not fully compatible with the [JSON-RPC 2.0 spec][json-rpc], but handles + the common cases. + +- Websocket: Queries may be sent as JSON-RPC request messages via a websocket. + This transport uses more or less the same JSON-RPC plumbing as the HTTP POST + handler. + + The websocket endpoint also includes three methods that are _only_ exported + via websocket, which appear to support event subscription. + +- gRPC: A subset of queries may be issued in protocol buffer format to the gRPC + interface described above under (4). As noted, this endpoint is deprecated + and will be removed in v0.36. + +### Opportunities for Simplification + +**Claim:** There are too many IPC mechanisms. + +The preponderance of ABCI usage is via the Cosmos SDK, which means the +application and the consensus node are compiled together into a single binary, +and the consensus node calls the ABCI methods of the application directly as Go +functions. + +We also need a true IPC transport to support ABCI applications _not_ written in +Go. There are also several known applications written in Rust, for example +(including [Anoma](https://github.com/anoma/anoma), Penumbra, +[Oasis](https://github.com/oasisprotocol/oasis-core), Twilight, and +[Nomic](https://github.com/nomic-io/nomic)). Ideally we will have at most one +such transport "built-in": More esoteric cases can be handled by a custom proxy. +Pragmatically, gRPC is probably the right choice here. + +The primary consumers of the multi-headed "RPC service" today are the light +client and the `tendermint` command-line client. There is probably some local +use via curl, but I expect that is mostly ad hoc. Ethan reports that nodes are +often configured with the ports to the RPC service blocked, which is good for +security but complicates use by the light client. + +### Context: Remote Signer Issues + +Since the remote signer needs a secure communication channel to exchange keys +and signatures, and is expected to run truly remotely from the node (i.e., on a +separate physical server), there is not a whole lot we can do here. We should +finish the deprecation and removal of the "raw" socket protocol between the +consensus node and remote signers, but the use of gRPC is appropriate. + +The main improvement we can make is to simplify the implementation quite a bit, +once we no longer need to support both "raw" and gRPC transports. + +### Context: ABCI Issues + +In the original design of ABCI, the presumption was that all access to the +application should be mediated by the consensus node. The idea is that outside +access could change application state and corrupt the consensus process, which +relies on the application to be deterministic. Of course, even without outside +access an application could behave nondeterministically, but allowing other +programs to send it requests was seen as courting trouble. + +Conversely, users noted that most of the time, tools written for a particular +application don't want to talk to the consensus module directly. The +application "owns" the state machine the consensus engine is replicating, so +tools that care about application state should talk to the application. +Otherwise, they would have to bake in knowledge about Tendermint (e.g., its +interfaces and data structures) just because of the mediation. + +For clients to talk directly to the application, however, there is another +concern: The consensus node is the ABCI _client_, so it is inconvenient for the +application to "push" work into the consensus module via ABCI itself. The +current implementation works around this by calling the consensus node's RPC +service, which exposes an `ABCIQuery` kitchen-sink method that allows the +application a way to poke ABCI messages in the other direction. + +Without this RPC method, you could work around this (at least in principle) by +having the consensus module "poll" the application for work that needs done, +but that has unsatisfactory implications for performance and robustness, as +well as being harder to understand. + +There has apparently been discussion about trying to make a more bidirectional +communication between the consensus node and the application, but this issue +seems to still be unresolved. + +Another complication of ABCI is that it requires the application (server) to +maintain [four separate connections][abci-conn]: One for "consensus" operations +(BeginBlock, EndBlock, DeliverTx, Commit), one for "mempool" operations, one +for "query" operations, and one for "snapshot" (state synchronization) operations. +The rationale seems to have been that these groups of operations should be able +to proceed concurrently with each other. In practice, it results in a very complex +state management problem to coordinate state updates between the separate streams. +While application authors in Go are mostly insulated from that complexity by the +Cosmos SDK, the plumbing to maintain those separate streams is complicated, hard +to understand, and we suspect it contains concurrency bugs and/or lock contention +issues affecting performance that are subtle and difficult to pin down. + +Even without changing the semantics of any ABCI operations, this code could be +made smaller and easier to debug by separating the management of concurrency +and locking from the IPC transport: If all requests and responses are routed +through one connection, the server can explicitly maintain priority queues for +requests and responses, and make less-conservative decisions about when locks +are (or aren't) required to synchronize state access. With independent queues, +the server must lock conservatively, and no optimistic scheduling is practical. + +This would be a tedious implementation change, but should be achievable without +breaking any of the existing interfaces. More importantly, it could potentially +address a lot of difficult concurrency and performance problems we currently +see anecdotally but have difficultly isolating because of how intertwined these +separate message streams are at runtime. + +TODO: Impact of ABCI++ for this topic? + +### Context: RPC Issues + +The RPC system serves several masters, and has a complex surface area. I +believe there are some improvements that can be exposed by separating some of +these concerns. + +The Tendermint light client currently uses the RPC service to look up blocks +and transactions, and to forward ABCI queries to the application. The light +client proxy uses the RPC service via a websocket. The Cosmos IBC relayer also +uses the RPC service via websocket to watch for transaction events, and uses +the `ABCIQuery` method to fetch information and proofs for posted transactions. + +Some work is already underway toward using P2P message passing rather than RPC +to synchronize light client state with the rest of the network. IBC relaying, +however, requires access to the event system, which is currently not accessible +except via the RPC interface. Event subscription _could_ be exposed via P2P, +but that is a larger project since it adds P2P communication load, and might +thus have an impact on the performance of consensus. + +If event subscription can be moved into the P2P network, we could entirely +remove the websocket transport, even for clients that still need access to the +RPC service. Until then, we may still be able to reduce the scope of the +websocket endpoint to _only_ event subscription, by moving uses of the RPC +server as a proxy to ABCI over to the gRPC interface. + +Having the RPC server still makes sense for local bootstrapping and operations, +but can be further simplified. Here are some specific proposals: + +- Remove the HTTP GET interface entirely. + +- Simplify JSON-RPC plumbing to remove unnecessary reflection and wrapping. + +- Remove the gRPC interface (this is already planned for v0.36). + +- Separate the websocket interface from the rest of the RPC service, and + restrict it to only event subscription. + + Eventually we should try to emove the websocket interface entirely, but we + will need to revisit that (probably in a new RFC) once we've done some of the + easier things. + +These changes would preserve the ability of operators to issue queries with +curl (but would require using JSON-RPC instead of URI parameters). That would +be a little less user-friendly, but for a use case that should not be that +prevalent. + +These changes would also preserve compatibility with existing JSON-RPC based +code paths like the `tendermint` CLI and the light client (even ahead of +further work to remove that dependency). + +**Design goal:** An operator should be able to disable non-local access to the +RPC server on any node in the network without impairing the ability of the +network to function for service of state replication, including light clients. + +**Design principle:** All communication required to implement and monitor the +consensus network should use P2P, including the various synchronizations. + +### Options for ABCI Transport + +The majority of current usage is in Go, and the majority of that is mediated by +the Cosmos SDK, which uses the "direct call" interface. There is probably some +opportunity to clean up the implementation of that code, notably by inverting +which interface is at the "top" of the abstraction stack (currently it acts +like an RPC interface, and escape-hatches into the direct call). However, this +general approach works fine and doesn't need to be fundamentally changed. + +For applications _not_ written in Go, the two remaining options are the +"socket" protocol (another variation on varint-prefixed protobuf messages over +an unstructured stream) and gRPC. It would be nice if we could get rid of one +of these to reduce (unneeded?) optionality. + +Since both the socket protocol and gRPC depend on protocol buffers, the +"socket" protocol is the most obvious choice to remove. While gRPC is more +complex, the set of languages that _have_ protobuf support but _lack_ gRPC +support is small. Moreover, gRPC is already widely used in the rest of the +ecosystem (including the Cosmos SDK). + +If some use case did arise later that can't work with gRPC, it would not be too +difficult for that application author to write a little proxy (in Go) that +bridges the convenient SDK APIs into a simpler protocol than gRPC. + +**Design principle:** It is better for an uncommon special case to carry the +burdens of its specialness, than to bake an escape hatch into the infrastructure. + +**Recommendation:** We should deprecate and remove the socket protocol. + +### Options for RPC Transport + +[ADR 057][adr-57] proposes using gRPC for the Tendermint RPC implementation. +This is still possible, but if we are able to simplify and decouple the +concerns as described above, I do not think it should be necessary. + +While JSON-RPC is not the best possible RPC protocol for all situations, it has +some advantages over gRPC for our domain. Specifically: + +- It is easy to call JSON-RPC manually from the command-line, which helps with + a common concern for the RPC service, local debugging and operations. + + Relatedly: JSON is relatively easy for humans to read and write, and it can + be easily copied and pasted to share sample queries and debugging results in + chat, issue comments, and so on. Ideally, the RPC service will not be used + for activities where the costs of a text protocol are important compared to + its legibility and manual usability benefits. + +- gRPC has an enormous dependency footprint for both clients and servers, and + many of the features it provides to support security and performance + (encryption, compression, streaming, etc.) are mostly irrelevant to local + use. Tendermint already needs to include a gRPC client for the remote signer, + but if we can avoid the need for a _client_ to depend on gRPC, that is a win + for usability. + +- If we intend to migrate light clients off RPC to use P2P entirely, there is + no advantage to forcing a temporary migration to gRPC along the way; and once + the light client is not dependent on the RPC service, the efficiency of the + protocol is much less important. + +- We can still get the benefits of generated data types using protocol buffers, even + without using gRPC: + + - Protobuf defines a standard JSON encoding for all message types so + languages with protobuf support do not need to worry about type mapping + oddities. + + - Using JSON means that even languages _without_ good protobuf support can + implement the protocol with a bit more work, and I expect this situation to + be rare. + +Even if a language lacks a good standard JSON-RPC mechanism, the protocol is +lightweight and can be implemented by simple send/receive over TCP or +Unix-domain sockets with no need for code generation, encryption, etc. gRPC +uses a complex HTTP/2 based transport that is not easily replicated. + +### Future Work + +The background and proposals sketched above focus on the existing structure of +Tendermint and improvements we can make in the short term. It is worthwhile to +also consider options for longer-term broader changes to the IPC ecosystem. +The following outlines some ideas at a high level: + +- **Consensus service:** Today, the application and the consensus node are + nominally connected only via ABCI. Tendermint was originally designed with + the assumption that all communication with the application should be mediated + by the consensus node. Based on further experience, however, the design goal + is now that the _application_ should be the mediator of application state. + + As noted above, however, ABCI is a client/server protocol, with the + application as the server. For outside clients that turns out to have been a + good choice, but it complicates the relationship between the application and + the consensus node: Previously transactions were entered via the node, now + they are entered via the app. + + We have worked around this by using the Tendermint RPC service to give the + application a "back channel" to the consensus node, so that it can push + transactions back into the consensus network. But the RPC service exposes a + lot of other functionality, too, including event subscription, block and + transaction queries, and a lot of node status information. + + Even if we can't easily "fix" the orientation of the ABCI relationship, we + could improve isolation by splitting out the parts of the RPC service that + the application needs as a back-channel, and sharing those _only_ with the + application. By defining a "consensus service", we could give the application + a way to talk back limited to only the capabilities it needs. This approach + has the benefit that we could do it without breaking existing use, and if we + later did "fix" the ABCI directionality, we could drop the special case + without disrupting the rest of the RPC interface. + +- **Event service:** Right now, the IBC relayer relies on the Tendermint RPC + service to provide a stream of block and transaction events, which it uses to + discover which transactions need relaying to other chains. While I think + that event subscription should eventually be handled via P2P, we could gain + some immediate benefit by splitting out event subscription from the rest of + the RPC service. + + In this model, an event subscription service would be exposed on the public + network, but on a different endpoint. This would remove the need for the RPC + service to support the websocket protocol, and would allow operators to + isolate potentially sensitive status query results from the public network. + + At the moment the relayers also use the RPC service to get block data for + synchronization, but work is already in progress to handle that concern via + the P2P layer. Once that's done, event subscription could be separated. + +Separating parts of the existing RPC service is not without cost: It might +require additional connection endpoints, for example, though it is also not too +difficult for multiple otherwise-independent services to share a connection. + +In return, though, it would become easier to reduce transport options and for +operators to independently control access to sensitive data. Considering the +viability and implications of these ideas is beyond the scope of this RFC, but +they are documented here since they follow from the background we have already +discussed. + +## References + +[abci]: https://github.com/tendermint/spec/tree/95cf253b6df623066ff7cd4074a94e7a3f147c7a/spec/abci +[rpc-service]: https://docs.tendermint.com/master/rpc/ +[light-client]: https://docs.tendermint.com/master/tendermint-core/light-client.html +[tm-cli]: https://github.com/tendermint/tendermint/tree/master/cmd/tendermint +[cosmos-sdk]: https://github.com/cosmos/cosmos-sdk/ +[local-client]: https://github.com/tendermint/tendermint/blob/master/abci/client/local_client.go +[socket-server]: https://github.com/tendermint/tendermint/blob/master/abci/server/socket_server.go +[sdk-grpc]: https://pkg.go.dev/github.com/cosmos/cosmos-sdk/types/tx#ServiceServer +[json-rpc]: https://www.jsonrpc.org/specification +[abci-conn]: https://github.com/tendermint/spec/blob/master/spec/abci/apps.md#state +[adr-57]: https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-057-RPC.md diff --git a/docs/rfc/rfc-003-performance-questions.md b/docs/rfc/rfc-003-performance-questions.md new file mode 100644 index 000000000..b8111ffa2 --- /dev/null +++ b/docs/rfc/rfc-003-performance-questions.md @@ -0,0 +1,283 @@ +# RFC 003: Taxonomy of potential performance issues in Tendermint + +## Changelog + +- 2021-09-02: Created initial draft (@wbanfield) +- 2021-09-14: Add discussion of the event system (@wbanfield) + +## Abstract + +This document discusses the various sources of performance issues in Tendermint and +attempts to clarify what work may be required to understand and address them. + +## Background + +Performance, loosely defined as the ability of a software process to perform its work +quickly and efficiently under load and within reasonable resource limits, is a frequent +topic of discussion in the Tendermint project. +To effectively address any issues with Tendermint performance we need to +categorize the various issues, understand their potential sources, and gauge their +impact on users. + +Categorizing the different known performance issues will allow us to discuss and fix them +more systematically. This document proposes a rough taxonomy of performance issues +and highlights areas where more research into potential performance problems is required. + +Understanding Tendermint's performance limitations will also be critically important +as we make changes to many of its subsystems. Performance is a central concern for +upcoming decisions regarding the `p2p` protocol, RPC message encoding and structure, +database usage and selection, and consensus protocol updates. + + +## Discussion + +This section attempts to delineate the different sections of Tendermint functionality +that are often cited as having performance issues. It raises questions and suggests +lines of inquiry that may be valuable for better understanding Tendermint's performance issues. + +As a note: We should avoid quickly adding many microbenchmarks or package level benchmarks. +These are prone to being worse than useless as they can obscure what _should_ be +focused on: performance of the system from the perspective of a user. We should, +instead, tune performance with an eye towards user needs and actions users make. These users comprise +both operators of Tendermint chains and the people generating transactions for +Tendermint chains. Both of these sets of users are largely aligned in wanting an end-to-end +system that operates quickly and efficiently. + +REQUEST: The list below may be incomplete, if there are additional sections that are often +cited as creating poor performance, please comment so that they may be included. + +### P2P + +#### Claim: Tendermint cannot scale to large numbers of nodes + +A complaint has been reported that Tendermint networks cannot scale to large numbers of nodes. +The listed number of nodes a user reported as causing issue was in the thousands. +We don't currently have evidence about what the upper-limit of nodes that Tendermint's +P2P stack can scale to. + +We need to more concretely understand the source of issues and determine what layer +is causing a problem. It's possible that the P2P layer, in the absence of any reactors +sending data, is perfectly capable of managing thousands of peer connections. For +a reasonable networking and application setup, thousands of connections should not present any +issue for the application. + +We need more data to understand the problem directly. We want to drive the popularity +and adoption of Tendermint and this will mean allowing for chains with more validators. +We should follow up with users experiencing this issue. We may then want to add +a series of metrics to the P2P layer to better understand the inefficiencies it produces. + +The following metrics can help us understand the sources of latency in the Tendermint P2P stack: + +* Number of messages sent and received per second +* Time of a message spent on the P2P layer send and receive queues + +The following metrics exist and should be leveraged in addition to those added: + +* Number of peers node's connected to +* Number of bytes per channel sent and received from each peer + +### Sync + +#### Claim: Block Syncing is slow + +Bootstrapping a new node in a network to the height of the rest of the network is believed to +take longer than users would like. Block sync requires fetching all of the blocks from +peers and placing them into the local disk for storage. A useful line of inquiry +is understanding how quickly a perfectly tuned system _could_ fetch all of the state +over a network so that we understand how much overhead Tendermint actually adds. + +The operation is likely to be _incredibly_ dependent on the environment in which +the node is being run. The factors that will influence syncing include: +1. Number of peers that a syncing node may fetch from. +2. Speed of the disk that a validator is writing to. +3. Speed of the network connection between the different peers that node is +syncing from. + +We should calculate how quickly this operation _could possibly_ complete for common chains and nodes. +To calculate how quickly this operation could possibly complete, we should assume that +a node is reading at line-rate of the NIC and writing at the full drive speed to its +local storage. Comparing this theoretical upper-limit to the actual sync times +observed by node operators will give us a good point of comparison for understanding +how much overhead Tendermint incurs. + +We should additionally add metrics to the blocksync operation to more clearly pinpoint +slow operations. The following metrics should be added to the block syncing operation: + +* Time to fetch and validate each block +* Time to execute a block +* Blocks sync'd per unit time + +### Application + +Applications performing complex state transitions have the potential to bottleneck +the Tendermint node. + +#### Claim: ABCI block delivery could cause slowdown + +ABCI delivers blocks in several methods: `BeginBlock`, `DeliverTx`, `EndBlock`, `Commit`. + +Tendermint delivers transactions one-by-one via the `DeliverTx` call. Most of the +transaction delivery in Tendermint occurs asynchronously and therefore appears unlikely to +form a bottleneck in ABCI. + +After delivering all transactions, Tendermint then calls the `Commit` ABCI method. +Tendermint [locks all access to the mempool][abci-commit-description] while `Commit` +proceeds. This means that an application that is slow to execute all of its +transactions or finalize state during the `Commit` method will prevent any new +transactions from being added to the mempool. Apps that are slow to commit will +prevent consensus from proceeded to the next consensus height since Tendermint +cannot validate block proposals or produce block proposals without the +AppHash obtained from the `Commit` method. We should add a metric for each +step in the ABCI protocol to track the amount of time that a node spends communicating +with the application at each step. + +#### Claim: ABCI serialization overhead causes slowdown + +The most common way to run a Tendermint application is using the Cosmos-SDK. +The Cosmos-SDK runs the ABCI application within the same process as Tendermint. +When an application is run in the same process as Tendermint, a serialization penalty +is not paid. This is because the local ABCI client does not serialize method calls +and instead passes the protobuf type through directly. This can be seen +in [local_client.go][abci-local-client-code]. + +Serialization and deserialization in the gRPC and socket protocol ABCI methods +may cause slowdown. While these may cause issue, they are not part of the primary +usecase of Tendermint and do not necessarily need to be addressed at this time. + +### RPC + +#### Claim: The Query API is slow. + +The query API locks a mutex across the ABCI connections. This causes consensus to +slow during queries, as ABCI is no longer able to make progress. This is known +to be causing issue in the cosmos-sdk and is being addressed [in the sdk][sdk-query-fix] +but a more robust solution may be required. Adding metrics to each ABCI client connection +and message as described in the Application section of this document would allow us +to further introspect the issue here. + +#### Claim: RPC Serialization may cause slowdown + +The Tendermint RPC uses a modified version of JSON-RPC. This RPC powers the `broadcast_tx_*` methods, +which is a critical method for adding transactions to Tendermint at the moment. This method is +likely invoked quite frequently on popular networks. Being able to perform efficiently +on this common and critical operation is very important. The current JSON-RPC implementation +relies heavily on type introspection via reflection, which is known to be very slow in +Go. We should therefore produce benchmarks of this method to determine how much overhead +we are adding to what, is likely to be, a very common operation. + +The other JSON-RPC methods are much less critical to the core functionality of Tendermint. +While there may other points of performance consideration within the RPC, methods that do not +receive high volumes of requests should not be prioritized for performance consideration. + +NOTE: Previous discussion of the RPC framework was done in [ADR 57][adr-57] and +there is ongoing work to inspect and alter the JSON-RPC framework in [RFC 002][rfc-002]. +Much of these RPC-related performance considerations can either wait until the work of RFC 002 work is done or be +considered concordantly with the in-flight changes to the JSON-RPC. + +### Protocol + +#### Claim: Gossiping messages is a slow process + +Currently, for any validator to successfully vote in a consensus _step_, it must +receive votes from greater than 2/3 of the validators on the network. In many cases, +it's preferable to receive as many votes as possible from correct validators. + +This produces a quadratic increase in messages that are communicated as more validators join the network. +(Each of the N validators must communicate with all other N-1 validators). + +This large number of messages communicated per step has been identified to impact +performance of the protocol. Given that the number of messages communicated has been +identified as a bottleneck, it would be extremely valuable to gather data on how long +it takes for popular chains with many validators to gather all votes within a step. + +Metrics that would improve visibility into this include: + +* Amount of time for a node to gather votes in a step. +* Amount of time for a node to gather all block parts. +* Number of votes each node sends to gossip (i.e. not its own votes, but votes it is +transmitting for a peer). +* Total number of votes each node sends to receives (A node may receive duplicate votes +so understanding how frequently this occurs will be valuable in evaluating the performance +of the gossip system). + +#### Claim: Hashing Txs causes slowdown in Tendermint + +Using a faster hash algorithm for Tx hashes is currently a point of discussion +in Tendermint. Namely, it is being considered as part of the [modular hashing proposal][modular-hashing]. +It is currently unknown if hashing transactions in the Mempool forms a significant bottleneck. +Although it does not appear to be documented as slow, there are a few open github +issues that indicate a possible user preference for a faster hashing algorithm, +including [issue 2187][issue-2187] and [issue 2186][issue-2186]. + +It is likely worth investigating what order of magnitude Tx hashing takes in comparison to other +aspects of adding a Tx to the mempool. It is not currently clear if the rate of adding Tx +to the mempool is a source of user pain. We should not endeavor to make large changes to +consensus critical components without first being certain that the change is highly +valuable and impactful. + +### Digital Signatures + +#### Claim: Verification of digital signatures may cause slowdown in Tendermint + +Working with cryptographic signatures can be computationally expensive. The cosmos +hub uses [ed25519 signatures][hub-signature]. The library performing signature +verification in Tendermint on votes is [benchmarked][ed25519-bench] to be able to perform an `ed25519` +signature in 75μs on a decently fast CPU. A validator in the Cosmos Hub performs +3 sets of verifications on the signatures of the 140 validators in the Hub +in a consensus round, during block verification, when verifying the prevotes, and +when verifying the precommits. With no batching, this would be roughly `3ms` per +round. It is quite unlikely, therefore, that this accounts for any serious amount +of the ~7 seconds of block time per height in the Hub. + +This may cause slowdown when syncing, since the process needs to constantly verify +signatures. It's possible that improved signature aggregation will lead to improved +light client or other syncing performance. In general, a metric should be added +to track block rate while blocksyncing. + +#### Claim: Our use of digital signatures in the consensus protocol contributes to performance issue + +Currently, Tendermint's digital signature verification requires that all validators +receive all vote messages. Each validator must receive the complete digital signature +along with the vote message that it corresponds to. This means that all N validators +must receive messages from at least 2/3 of the N validators in each consensus +round. Given the potential for oddly shaped network topologies and the expected +variable network roundtrip times of a few hundred milliseconds in a blockchain, +it is highly likely that this amount of gossiping is leading to a significant amount +of the slowdown in the Cosmos Hub and in Tendermint consensus. + +### Tendermint Event System + +#### Claim: The event system is a bottleneck in Tendermint + +The Tendermint Event system is used to communicate and store information about +internal Tendermint execution. The system uses channels internally to send messages +to different subscribers. Sending an event [blocks on the internal channel][event-send]. +The default configuration is to [use an unbuffered channel for event publishes][event-buffer-capacity]. +Several consumers of the event system also use an unbuffered channel for reads. +An example of this is the [event indexer][event-indexer-unbuffered], which takes an +unbuffered subscription to the event system. The result is that these unbuffered readers +can cause writes to the event system to block or slow down depending on contention in the +event system. This has implications for the consensus system, which [publishes events][consensus-event-send]. +To better understand the performance of the event system, we should add metrics to track the timing of +event sends. The following metrics would be a good start for tracking this performance: + +* Time in event send, labeled by Event Type +* Time in event receive, labeled by subscriber +* Event throughput, measured in events per unit time. + +### References +[modular-hashing]: https://github.com/tendermint/tendermint/pull/6773 +[issue-2186]: https://github.com/tendermint/tendermint/issues/2186 +[issue-2187]: https://github.com/tendermint/tendermint/issues/2187 +[rfc-002]: https://github.com/tendermint/tendermint/pull/6913 +[adr-57]: https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-057-RPC.md +[issue-1319]: https://github.com/tendermint/tendermint/issues/1319 +[abci-commit-description]: https://github.com/tendermint/spec/blob/master/spec/abci/apps.md#commit +[abci-local-client-code]: https://github.com/tendermint/tendermint/blob/511bd3eb7f037855a793a27ff4c53c12f085b570/abci/client/local_client.go#L84 +[hub-signature]: https://github.com/cosmos/gaia/blob/0ecb6ed8a244d835807f1ced49217d54a9ca2070/docs/resources/genesis.md#consensus-parameters +[ed25519-bench]: https://github.com/oasisprotocol/curve25519-voi/blob/d2e7fc59fe38c18ca990c84c4186cba2cc45b1f9/PERFORMANCE.md +[event-send]: https://github.com/tendermint/tendermint/blob/5bd3b286a2b715737f6d6c33051b69061d38f8ef/libs/pubsub/pubsub.go#L338 +[event-buffer-capacity]: https://github.com/tendermint/tendermint/blob/5bd3b286a2b715737f6d6c33051b69061d38f8ef/types/event_bus.go#L14 +[event-indexer-unbuffered]: https://github.com/tendermint/tendermint/blob/5bd3b286a2b715737f6d6c33051b69061d38f8ef/state/indexer/indexer_service.go#L39 +[consensus-event-send]: https://github.com/tendermint/tendermint/blob/5bd3b286a2b715737f6d6c33051b69061d38f8ef/internal/consensus/state.go#L1573 +[sdk-query-fix]: https://github.com/cosmos/cosmos-sdk/pull/10045 diff --git a/docs/rfc/rfc-004-e2e-framework.rst b/docs/rfc/rfc-004-e2e-framework.rst new file mode 100644 index 000000000..8508ca173 --- /dev/null +++ b/docs/rfc/rfc-004-e2e-framework.rst @@ -0,0 +1,213 @@ +======================================== +RFC 004: E2E Test Framework Enhancements +======================================== + +Changelog +--------- + +- 2021-09-14: started initial draft (@tychoish) + +Abstract +-------- + +This document discusses a series of improvements to the e2e test framework +that we can consider during the next few releases to help boost confidence in +Tendermint releases, and improve developer efficiency. + +Background +---------- + +During the 0.35 release cycle, the E2E tests were a source of great +value, helping to identify a number of bugs before release. At the same time, +the tests were not consistently passing during this time, thereby reducing +their value, and forcing the core development team to allocate time and energy +to maintaining and chasing down issues with the e2e tests and the test +harness. The experience of this release cycle calls to mind a series of +improvements to the test framework, and this document attempts to capture +these improvements, along with motivations, and potential for impact. + +Projects +-------- + +Flexible Workload Generation +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Presently the e2e suite contains a single workload generation pattern, which +exists simply to ensure that the test networks have some work during their +runs. However, the shape and volume of the work is very consistent and is very +gentle to help ensure test reliability. + +We don't need a complex workload generation framework, but being able to have +a few different workload shapes available for test networks, both generated and +hand-crafted, would be useful. + +Workload patterns/configurations might include: + +- transaction targeting patterns (include light nodes, round robin, target + individual nodes) + +- variable transaction size over time. + +- transaction broadcast option (synchronously, checked, fire-and-forget, + mixed). + +- number of transactions to submit. + +- non-transaction workloads: (evidence submission, query, event subscription.) + +Configurable Generator +~~~~~~~~~~~~~~~~~~~~~~ + +The nightly e2e suite is defined by the `testnet generator +`_, +and it's difficult to add dimensions or change the focus of the test suite in +any way without modifying the implementation of the generator. If the +generator were more configurable, potentially via a file rather than in +the Go implementation, we could modify the focus of the test suite on the +fly. + +Features that we might want to configure: + +- number of test networks to generate of various topologies, to improve + coverage of different configurations. + +- test application configurations (to modify the latency of ABCI calls, etc.) + +- size of test networks. + +- workload shape and behavior. + +- initial sync and catch-up configurations. + +The workload generator currently provides runtime options for limiting the +generator to specific types of P2P stacks, and for generating multiple groups +of test cases to support parallelism. The goal is to extend this pattern and +avoid hardcoding the matrix of test cases in the generator code. Once the +testnet configuration generation behavior is configurable at runtime, +developers may be able to use the e2e framework to validate changes before +landing changes that break e2e tests a day later. + +In addition to the autogenerated suite, it might make sense to maintain a +small collection of hand-crafted cases that exercise configurations of +concern, to run as part of the nightly (or less frequent) loop. + +Implementation Plan Structure +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +As a development team, we should determine the features should impact the e2e +testing early in the development cycle, and if we intend to modify the e2e +tests to exercise a feature, we should identify this early and begin the +integration process as early as possible. + +To facilitate this, we should adopt a practice whereby we exercise specific +features that are currently under development more rigorously in the e2e +suite, and then as development stabilizes we can reduce the number or weight +of these features in the suite. + +As of 0.35 there are essentially two end to end tests: the suite of 64 +generated test networks, and the hand crafted `ci.toml` test case. The +generated test cases help provide systemtic coverage, while the `ci` run +provides coverage for a large number of features. + +Reduce Cycle Time +~~~~~~~~~~~~~~~~~ + +One of the barriers to leveraging the e2e framework, and one of the challenges +in debugging failures, is the cycle time of running a single test iteration is +quite high: 5 minutes to build the docker image, plus the time to run the test +or tests. + +There are a number of improvements and enhancements that can reduce the cycle +time in practice: + +- reduce the amount of time required to build the docker image used in these + tests. Without the dependency on CGo, the tendermint binaries could be + (cross) compiled outside of the docker container and then injected into + them, which would take better advantage of docker's native caching, + although, without the dependency on CGo there would be no hard requirement + for the e2e tests to use docker. + +- support test parallelism. Because of the way the testnets are orchestrated + a single system can really only run one network at a time. For executions + (local or remote) with more resources, there's no reason to run a few + networks in parallel to reduce the feedback time. + +- prune testnet configurations that are unlikely to provide good signal, to + shorten the time to feedback. + +- apply some kind of tiered approach to test execution, to improve the + legibility of the test result. For example order tests by the dependency of + their features, or run test networks without perturbations before running + that configuration with perturbations, to be able to isolate the impact of + specific features. + +- orchestrate the test harness directly from go test rather than via a special + harness and shell scripts so e2e tests may more naively fit into developers + existing workflows. + +Many of these improvements, particularly, reducing the build time will also +reduce the time to get feedback during automated builds. + +Deeper Insights +~~~~~~~~~~~~~~~ + +When a test network fails, it's incredibly difficult to understand _why_ the +network failed, as the current system provides very little insight into the +system outside of the process logs. When a test network stalls or fails +developers should be able to quickly and easily get a sense of the state of +the network and all nodes. + +Improvements in persuit of this goal, include functionality that would help +node operators in production environments by improving the quality and utility +of the logging messages and other reported metrics, but also provide some +tools to collect and aggregate this data for developers in the context of test +networks. + +- Interleave messages from all nodes in the network to be able to correlate + events during the test run. + +- Collect structured metrics of the system operation (CPU/MEM/IO) during the + test run, as well as from each tendermint/application process. + +- Build (simple) tools to be able to render and summarize the data collected + during the test run to answer basic questions about test outcome. + +Flexible Assertions +~~~~~~~~~~~~~~~~~~~ + +Currently, all assertions run for every test network, which makes the +assertions pretty bland, and the framework primarily useful as a smoke-test +framework, but it might be useful to be able to write and run different +tests for different configurations. This could allow us to test outside of the +happy-path. + +In general our existing assertions occupy a fraction of the total test time, +so the relative cost of adding a few extra test assertions would be of limited +cost, and could help build confidence. + +Additional Kinds of Testing +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The existing e2e suite, exercises networks of nodes that have homogeneous +tendermint version, stable configuration, that are expected to make +progress. There are many other possible test configurations that may be +interesting to engage with. These could include dimensions, such as: + +- Multi-version testing to exercise our compatibility guarantees for networks + that might have different tendermint versions. + +- As a flavor or mult-version testing, include upgrade testing, to build + confidence in migration code and procedures. + +- Additional test applications, particularly practical-type applciations + including some that use gaiad and/or the cosmos-sdk. Test-only applications + that simulate other kinds of applications (e.g. variable application + operation latency.) + +- Tests of "non-viable" configurations that ensure that forbidden combinations + lead to halts. + +References +---------- + +- `ADR 66: End-to-End Testing <../architecture/adr-66-e2e-testing.md>`_ diff --git a/docs/rfc/rfc-005-event-system.rst b/docs/rfc/rfc-005-event-system.rst new file mode 100644 index 000000000..b4a00b43d --- /dev/null +++ b/docs/rfc/rfc-005-event-system.rst @@ -0,0 +1,122 @@ +===================== +RFC 005: Event System +===================== + +Changelog +--------- + +- 2021-09-17: Initial Draft (@tychoish) + +Abstract +-------- + +The event system within Tendermint, which supports a lot of core +functionality, also represents a major infrastructural liability. As part of +our upcoming review of the RPC interfaces and our ongoing thoughts about +stability and performance, as well as the preparation for Tendermint 1.0, we +should revisit the design and implementation of the event system. This +document discusses both the current state of the system and potential +directions for future improvement. + +Background +---------- + +Current State of Events +~~~~~~~~~~~~~~~~~~~~~~~ + +The event system makes it possible for clients, both internal and external, +to receive notifications of state replication events, such as new blocks, +new transactions, validator set changes, as well as intermediate events during +consensus. Because the event system is very cross cutting, the behavior and +performance of the event publication and subscription system has huge impacts +for all of Tendermint. + +The subscription service is exposed over the RPC interface, but also powers +the indexing (e.g. to an external database,) and is the mechanism by which +`BroadcastTxCommit` is able to wait for transactions to land in a block. + +The current pubsub mechanism relies on a couple of buffered channels, +primarily between all event creators and subscribers, but also for each +subscription. The result of this design is that, in some situations with the +right collection of slow subscription consumers the event system can put +backpressure on the consensus state machine and message gossiping in the +network, thereby causing nodes to lag. + +Improvements +~~~~~~~~~~~~ + +The current system relies on implicit, bounded queues built by the buffered channels, +and though threadsafe, can force all activity within Tendermint to serialize, +which does not need to happen. Additionally, timeouts for subscription +consumers related to the implementation of the RPC layer, may complicate the +use of the system. + +References +~~~~~~~~~~ + +- Legacy Implementation + - `publication of events `_ + - `send operation `_ + - `send loop `_ +- Related RFCs + - `RFC 002: IPC Ecosystem <./rfc-002-ipc-ecosystem.md>`_ + - `RFC 003: Performance Questions <./rfc-003-performance-questions.md>`_ + +Discussion +---------- + +Changes to Published Events +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +As part of this process, the Tendermint team should do a study of the existing +event types and ensure that there are viable production use cases for +subscriptions to all event types. Instinctively it seems plausible that some +of the events may not be useable outside of tendermint, (e.g. ``TimeoutWait`` +or ``NewRoundStep``) and it might make sense to remove them. Certainly, it +would be good to make sure that we don't maintain infrastructure for unused or +un-useful message indefinitely. + +Blocking Subscription +~~~~~~~~~~~~~~~~~~~~~ + +The blocking subscription mechanism makes it possible to have *send* +operations into the subscription channel be un-buffered (the event processing +channel is still buffered.) In the blocking case, events from one subscription +can block processing that event for other non-blocking subscriptions. The main +case, it seems for blocking subscriptions is ensuring that a transaction has +been committed to a block for ``BroadcastTxCommit``. Removing blocking +subscriptions entirely, and potentially finding another way to implement +``BroadcastTxCommit``, could lead to important simplifications and +improvements to throughput without requiring large changes. + +Subscription Identification +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Before `#6386 `_, all +subscriptions were identified by the combination of a client ID and a query, +and with that change, it became possible to identify all subscription given +only an ID, but compatibility with the legacy identification means that there's a +good deal of legacy code as well as client side efficiency that could be +improved. + +Pubsub Changes +~~~~~~~~~~~~~~ + +The pubsub core should be implemented in a way that removes the possibility of +backpressure from the event system to impact the core system *or* for one +subscription to impact the behavior of another area of the +system. Additionally, because the current system is implemented entirely in +terms of a collection of buffered channels, the event system (and large +numbers of subscriptions) can be a source of memory pressure. + +These changes could include: + +- explicit cancellation and timeouts promulgated from callers (e.g. RPC end + points, etc,) this should be done using contexts. + +- subscription system should be able to spill to disk to avoid putting memory + pressure on the core behavior of the node (consensus, gossip). + +- subscriptions implemented as cursors rather than channels, with either + condition variables to simulate the existing "push" API or a client side + iterator API with some kind of long polling-type interface. diff --git a/docs/rfc/rfc-template.md b/docs/rfc/rfc-template.md new file mode 100644 index 000000000..b3f404775 --- /dev/null +++ b/docs/rfc/rfc-template.md @@ -0,0 +1,35 @@ +# RFC {RFC-NUMBER}: {TITLE} + +## Changelog + +- {date}: {changelog} + +## Abstract + +> A brief high-level synopsis of the topic of discussion for this RFC, ideally +> just a few sentences. This should help the reader quickly decide whether the +> rest of the discussion is relevant to their interest. + +## Background + +> Any context or orientation needed for a reader to understand and participate +> in the substance of the Discussion. If necessary, this section may include +> links to other documentation or sources rather than restating existing +> material, but should provide enough detail that the reader can tell what they +> need to read to be up-to-date. + +### References + +> Links to external materials needed to follow the discussion may be added here. +> +> In addition, if the discussion in a request for comments leads to any design +> decisions, it may be helpful to add links to the ADR documents here after the +> discussion has settled. + +## Discussion + +> This section contains the core of the discussion. +> +> There is no fixed format for this section, but ideally changes to this +> section should be updated before merging to reflect any discussion that took +> place on the PR that made those changes. diff --git a/docs/roadmap/roadmap.md b/docs/roadmap/roadmap.md new file mode 100644 index 000000000..e6e5a32fe --- /dev/null +++ b/docs/roadmap/roadmap.md @@ -0,0 +1,100 @@ +--- +order: false +parent: + title: Roadmap + order: 7 +--- + +# Tendermint Roadmap + +*Last Updated: Friday 8 October 2021* + +This document endeavours to inform the wider Tendermint community about development plans and priorities for Tendermint Core, and when we expect features to be delivered. It is intended to broadly inform all users of Tendermint, including application developers, node operators, integrators, and the engineering and research teams. + +Anyone wishing to propose work to be a part of this roadmap should do so by opening an [issue](https://github.com/tendermint/spec/issues/new/choose) in the spec. Bug reports and other implementation concerns should be brought up in the [core repository](https://github.com/tendermint/tendermint). + +This roadmap should be read as a high-level guide to plans and priorities, rather than a commitment to schedules and deliverables. Features earlier on the roadmap will generally be more specific and detailed than those later on. We will update this document periodically to reflect the current status. + +The upgrades are split into two components: **Epics**, the features that define a release and to a large part dictate the timing of releases; and **minors**, features of smaller scale and lower priority, that could land in neighboring releases. + +## V0.35 (completed Q3 2021) + +### Prioritized Mempool + +Transactions were previously added to blocks in the order with which they arrived to the mempool. Adding a priority field via `CheckTx` gives applications more control over which transactions make it into a block. This is important in the presence of transaction fees. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-067-mempool-refactor.md) + +### Refactor of the P2P Framework + +The Tendermint P2P system is undergoing a large redesign to improve its performance and reliability. The first phase of this redesign is included in 0.35. This phase cleans and decouples abstractions, improves peer lifecycle management, peer address handling and enables pluggable transports. It is implemented to be protocol-compatible with the previous implementation. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-062-p2p-architecture.md) + +### State Sync Improvements + +Following the initial version of state sync, several improvements have been made. These include the addition of [Reverse Sync](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-068-reverse-sync.md) needed for evidence handling, the introduction of a [P2P State Provider](https://github.com/tendermint/tendermint/pull/6807) as an alternative to RPC endpoints, new configuration parameters to adjust throughput, and several bug fixes. + +### Custom event indexing + PSQL Indexer + +Added a new `EventSink` interface to allow alternatives to Tendermint's proprietary transaction indexer. We also added a PostgreSQL Indexer implementation, allowing rich SQL-based index queries. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-065-custom-event-indexing.md) + +### Minor Works + +- Several Go packages were reorganized to make the distinction between public APIs and implementation details more clear. +- Block indexer to index begin-block and end-block events. [More](https://github.com/tendermint/tendermint/pull/6226) +- Block, state, evidence, and light storage keys were reworked to preserve lexicographic order. This change requires a database migration. [More](https://github.com/tendermint/tendermint/pull/5771) +- Introduciton of Tendermint modes. Part of this change includes the possibility to run a separate seed node that runs the PEX reactor only. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-052-tendermint-mode.md) + +## V0.36 (expected Q1 2022) + +### ABCI++ + +An overhaul of the existing interface between the application and consensus, to give the application more control over block construction. ABCI++ adds new hooks allowing modification of transactions before they get into a block, verification of a block before voting, injection of signed information into votes, and more compact delivery of blocks after agreement (to allow for concurrent execution). [More](https://github.com/tendermint/spec/blob/master/rfc/004-abci%2B%2B.md) + +### Proposer-Based Timestamps + +Proposer-based timestamps are a replacement of [BFT time](https://docs.tendermint.com/master/spec/consensus/bft-time.html), whereby the proposer chooses a timestamp and validators vote on the block only if the timestamp is considered *timely*. This increases reliance on an accurate local clock, but in exchange makes block time more reliable and resistant to faults. This has important use cases in light clients, IBC relayers, CosmosHub inflation and enabling signature aggregation. [More](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-071-proposer-based-timestamps.md) + +### Soft Upgrades + +We are working on a suite of tools and patterns to make it easier for both node operators and application developers to quickly and safely upgrade to newer versions of Tendermint. [More](https://github.com/tendermint/spec/pull/222) + +### Minor Works + +- Remove the "legacy" P2P framework, and clean up of P2P package. [More](https://github.com/tendermint/tendermint/issues/5670) +- Remove the global mutex from the local ABCI client to enable application-controlled concurrency. [More](https://github.com/tendermint/tendermint/issues/7073) +- Enable P2P support for light clients +- Node orchestration of services + Node initialization and composibility +- Remove redundancy in several data structures. Remove unused components such as the block sync v2 reactor, gRPC in the RPC layer, and the socket-based remote signer. +- Improve node visibility by introducing more metrics + +## V0.37 (expected Q3 2022) + +### Complete P2P Refactor + +Finish the final phase of the P2P system. Ongoing research and planning is taking place to decide whether to adopt [libp2p](https://libp2p.io/), alternative transports to `MConn` such as [QUIC](https://en.wikipedia.org/wiki/QUIC) and handshake/authentication protocols such as [Noise](https://noiseprotocol.org/). Research into more advanced gossiping techniques. + +### Streamline Storage Engine + +Tendermint currently has an abstraction to allow support for multiple database backends. This generality incurs maintenance overhead and interferes with application-specific optimizations that Tendermint could use (ACID guarantees, etc.). We plan to converge on a single database and streamline the Tendermint storage engine. [More](https://github.com/tendermint/tendermint/pull/6897) + +### Evaluate Interprocess Communication + +Tendermint nodes currently have multiple areas of communication with other processes (ABCI, remote-signer, P2P, JSONRPC, websockets, events as examples). Many of these have multiple implementations in which a single suffices. Consolidate and clean up IPC. [More](https://github.com/tendermint/tendermint/blob/master/docs/rfc/rfc-002-ipc-ecosystem.md) + +### Minor Works + +- Amnesia attack handling. [More](https://github.com/tendermint/tendermint/issues/5270) +- Remove / Update Consensus WAL. [More](https://github.com/tendermint/tendermint/issues/6397) +- Signature Aggregation. [More](https://github.com/tendermint/tendermint/issues/1319) +- Remove gogoproto dependency. [More](https://github.com/tendermint/tendermint/issues/5446) + +## V1.0 (expected Q4 2022) + +Has the same feature set as V0.37 but with a focus towards testing, protocol correctness and minor tweaks to ensure a stable product. Such work might include extending the [consensus testing framework](https://github.com/tendermint/tendermint/issues/5920), the use of canary/long-lived testnets and greater integration tests. + +## Post 1.0 Work + +- Improved block propagation with erasure coding and/or compact blocks. [More](https://github.com/tendermint/spec/issues/347) +- Consensus engine refactor +- Bidirectional ABCI +- Randomized Leader Election +- ZK proofs / other cryptographic primitives +- Multichain Tendermint diff --git a/docs/tendermint-core/README.md b/docs/tendermint-core/README.md index fb359a08b..f83349db2 100644 --- a/docs/tendermint-core/README.md +++ b/docs/tendermint-core/README.md @@ -1,7 +1,7 @@ --- order: 1 parent: - title: System + title: Understanding Tendermint order: 5 --- @@ -10,7 +10,6 @@ parent: This section dives into the internals of Go-Tendermint. - [Using Tendermint](./using-tendermint.md) -- [Running in Production](./running-in-production.md) - [Subscribing to events](./subscription.md) - [Block Structure](./block-structure.md) - [RPC](./rpc.md) @@ -18,3 +17,5 @@ This section dives into the internals of Go-Tendermint. - [State Sync](./state-sync.md) - [Mempool](./mempool.md) - [Light Client](./light-client.md) + +For full specifications refer to the [spec repo](https://github.com/tendermint/spec). \ No newline at end of file diff --git a/docs/tendermint-core/block-sync.md b/docs/tendermint-core/block-sync.md index 9d362424f..43e849fcc 100644 --- a/docs/tendermint-core/block-sync.md +++ b/docs/tendermint-core/block-sync.md @@ -17,9 +17,9 @@ consensus gossip protocol. ## Using Block Sync -To support faster syncing, Tendermint offers a `fast-sync` mode, which +To support faster syncing, Tendermint offers a `blocksync` mode, which is enabled by default, and can be toggled in the `config.toml` or via -`--fast_sync=false`. +`--blocksync.enable=false`. In this mode, the Tendermint daemon will sync hundreds of times faster than if it used the real-time consensus process. Once caught up, the @@ -29,18 +29,23 @@ has at least one peer and it's height is at least as high as the max reported peer height. See [the IsCaughtUp method](https://github.com/tendermint/tendermint/blob/b467515719e686e4678e6da4e102f32a491b85a0/blockchain/pool.go#L128). -Note: There are two versions of Block Sync. We recommend using v0 as v2 is still in beta. +Note: There are multiple versions of Block Sync. Please use v0 as the other versions are no longer supported. If you would like to use a different version you can do so by changing the version in the `config.toml`: ```toml ####################################################### ### Block Sync Configuration Connections ### ####################################################### -[fastsync] +[blocksync] + +# If this node is many blocks behind the tip of the chain, BlockSync +# allows them to catchup quickly by downloading blocks in parallel +# and verifying their commits +enable = true # Block Sync version to use: -# 1) "v0" (default) - the legacy Block Sync implementation -# 2) "v2" - complete redesign of v0, optimized for testability & readability +# 1) "v0" (default) - the standard Block Sync implementation +# 2) "v2" - DEPRECATED, please use v0 version = "v0" ``` @@ -55,4 +60,4 @@ the network best height, it will switches to the state sync mechanism and then e another event for exposing the fast-sync `complete` status and the state `height`. The user can query the events by subscribing `EventQueryBlockSyncStatus` -Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details. \ No newline at end of file +Please check [types](https://pkg.go.dev/github.com/tendermint/tendermint/types?utm_source=godoc#pkg-constants) for the details. diff --git a/docs/tendermint-core/using-tendermint.md b/docs/tendermint-core/using-tendermint.md index bbcdd0370..4b8a26bad 100644 --- a/docs/tendermint-core/using-tendermint.md +++ b/docs/tendermint-core/using-tendermint.md @@ -185,51 +185,65 @@ the argument name and use `_` as a placeholder. ### Formatting -The following nuances when sending/formatting transactions should be -taken into account: +When sending transactions to the RPC interface, the following formatting rules +must be followed: -With `GET`: +Using `GET` (with parameters in the URL): -To send a UTF8 string byte array, quote the value of the tx parameter: +To send a UTF8 string as transaction data, enclose the value of the `tx` +parameter in double quotes: ```sh curl 'http://localhost:26657/broadcast_tx_commit?tx="hello"' ``` -which sends a 5 byte transaction: "h e l l o" \[68 65 6c 6c 6f\]. +which sends a 5-byte transaction: "h e l l o" \[68 65 6c 6c 6f\]. -Note the URL must be wrapped with single quotes, else bash will ignore -the double quotes. To avoid the single quotes, escape the double quotes: +Note that the URL in this example is enclosed in single quotes to prevent the +shell from interpreting the double quotes. Alternatively, you may escape the +double quotes with backslashes: ```sh curl http://localhost:26657/broadcast_tx_commit?tx=\"hello\" ``` -Using a special character: +The double-quoted format works with for multibyte characters, as long as they +are valid UTF8, for example: ```sh curl 'http://localhost:26657/broadcast_tx_commit?tx="€5"' ``` -sends a 4 byte transaction: "€5" (UTF8) \[e2 82 ac 35\]. +sends a 4-byte transaction: "€5" (UTF8) \[e2 82 ac 35\]. -To send as raw hex, omit quotes AND prefix the hex string with `0x`: +Arbitrary (non-UTF8) transaction data may also be encoded as a string of +hexadecimal digits (2 digits per byte). To do this, omit the quotation marks +and prefix the hex string with `0x`: ```sh -curl http://localhost:26657/broadcast_tx_commit?tx=0x01020304 +curl http://localhost:26657/broadcast_tx_commit?tx=0x68656C6C6F ``` -which sends a 4 byte transaction: \[01 02 03 04\]. +which sends the 5-byte transaction: \[68 65 6c 6c 6f\]. -With `POST` (using `json`), the raw hex must be `base64` encoded: +Using `POST` (with parameters in JSON), the transaction data are sent as a JSON +string in base64 encoding: ```sh -curl --data-binary '{"jsonrpc":"2.0","id":"anything","method":"broadcast_tx_commit","params": {"tx": "AQIDBA=="}}' -H 'content-type:text/plain;' http://localhost:26657 +curl http://localhost:26657 -H 'Content-Type: application/json' --data-binary '{ + "jsonrpc": "2.0", + "id": "anything", + "method": "broadcast_tx_commit", + "params": { + "tx": "aGVsbG8=" + } +}' ``` -which sends the same 4 byte transaction: \[01 02 03 04\]. +which sends the same 5-byte transaction: \[68 65 6c 6c 6f\]. -Note that raw hex cannot be used in `POST` transactions. +Note that the hexadecimal encoding of transaction data is _not_ supported in +JSON (`POST`) requests. ## Reset diff --git a/docs/tools/README.md b/docs/tools/README.md index 3e87a2ea1..5d778f470 100644 --- a/docs/tools/README.md +++ b/docs/tools/README.md @@ -27,3 +27,11 @@ testing Tendermint networks. This repository contains various different configurations of test networks for, and relating to, Tendermint. + +Use [Docker Compose](./docker-compose.md) to spin up Tendermint testnets on your +local machine. + +Use [Terraform and Ansible](./terraform-and-ansible.md) to deploy Tendermint +testnets to the cloud. + +See the `tendermint testnet --help` command for more help initializing testnets. diff --git a/docs/tools/debugging/README.md b/docs/tools/debugging/README.md index 2932f6e86..053b43624 100644 --- a/docs/tools/debugging/README.md +++ b/docs/tools/debugging/README.md @@ -62,3 +62,30 @@ given destination directory. Each archive will contain: Note: goroutine.out and heap.out will only be written if a profile address is provided and is operational. This command is blocking and will log any error. + +## Tendermint Inspect + +Tendermint includes an `inspect` command for querying Tendermint's state store and block +store over Tendermint RPC. + +When the Tendermint consensus engine detects inconsistent state, it will crash the +entire Tendermint process. +While in this inconsistent state, a node running Tendermint's consensus engine will not start up. +The `inspect` command runs only a subset of Tendermint's RPC endpoints for querying the block store +and state store. +`inspect` allows operators to query a read-only view of the stage. +`inspect` does not run the consensus engine at all and can therefore be used to debug +processes that have crashed due to inconsistent state. + + +To start the `inspect` process, run +```bash +tendermint inspect +``` + +### RPC endpoints +The list of available RPC endpoints can be found by making a request to the RPC port. +For an `inspect` process running on `127.0.0.1:26657`, navigate your browser to +`http://127.0.0.1:26657/` to retrieve the list of enabled RPC endpoints. + +Additional information on the Tendermint RPC endpoints can be found in the [rpc documentation](https://docs.tendermint.com/master/rpc). diff --git a/docs/tools/debugging/pro.md b/docs/tools/debugging/pro.md index 3342deb49..a248aa130 100644 --- a/docs/tools/debugging/pro.md +++ b/docs/tools/debugging/pro.md @@ -64,13 +64,42 @@ It won’t kill the node, but it will gather all of the above data and package i At this point, depending on how severe the degradation is, you may want to restart the process. +## Tendermint Inspect + +What if the Tendermint node will not start up due to inconsistent consensus state? + +When a node running the Tendermint consensus engine detects an inconsistent state +it will crash the entire Tendermint process. +The Tendermint consensus engine cannot be run in this inconsistent state and the so node +will fail to start up as a result. +The Tendermint RPC server can provide valuable information for debugging in this situation. +The Tendermint `inspect` command will run a subset of the Tendermint RPC server +that is useful for debugging inconsistent state. + +### Running inspect + +Start up the `inspect` tool on the machine where Tendermint crashed using: +```bash +tendermint inspect --home= +``` + +`inspect` will use the data directory specified in your Tendermint configuration file. +`inspect` will also run the RPC server at the address specified in your Tendermint configuration file. + +### Using inspect + +With the `inspect` server running, you can access RPC endpoints that are critically important +for debugging. +Calling the `/status`, `/consensus_state` and `/dump_consensus_state` RPC endpoint +will return useful information about the Tendermint consensus state. + ## Outro -We’re hoping that the `tendermint debug` subcommand will become de facto the first response to any accidents. +We’re hoping that these Tendermint tools will become de facto the first response for any accidents. -Let us know what your experience has been so far! Have you had a chance to try `tendermint debug` yet? +Let us know what your experience has been so far! Have you had a chance to try `tendermint debug` or `tendermint inspect` yet? -Join our chat, where we discuss the current issues and future improvements. +Join our [discord chat](https://discord.gg/cosmosnetwork), where we discuss the current issues and future improvements. — diff --git a/docs/networks/docker-compose.md b/docs/tools/docker-compose.md similarity index 100% rename from docs/networks/docker-compose.md rename to docs/tools/docker-compose.md diff --git a/docs/networks/terraform-and-ansible.md b/docs/tools/terraform-and-ansible.md similarity index 100% rename from docs/networks/terraform-and-ansible.md rename to docs/tools/terraform-and-ansible.md diff --git a/docs/tutorials/go-built-in.md b/docs/tutorials/go-built-in.md index 81325706b..d31b8d71e 100644 --- a/docs/tutorials/go-built-in.md +++ b/docs/tutorials/go-built-in.md @@ -23,6 +23,8 @@ yourself with the syntax. By following along with this guide, you'll create a Tendermint Core project called kvstore, a (very) simple distributed BFT key-value store. +> Note: please use a released version of Tendermint with this guide. The guides will work with the latest version. Please, do not use master. + ## Built-in app vs external app Running your application inside the same process as Tendermint Core will give @@ -50,10 +52,13 @@ We'll start by creating a new Go project. ```bash mkdir kvstore cd kvstore +go mod init github.com// ``` Inside the example directory create a `main.go` file with the following content: +> Note: there is no need to clone or fork Tendermint in this tutorial. + ```go package main @@ -388,7 +393,6 @@ func main() { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) <-c - os.Exit(0) } func newTendermint(app abci.Application, configFile string) (*nm.Node, error) { @@ -431,7 +435,7 @@ func newTendermint(app abci.Application, configFile string) (*nm.Node, error) { config, pv, nodeKey, - proxy.NewLocalClientCreator(app), + abcicli.NewLocalClientCreator(app), nm.DefaultGenesisDocProviderFunc(config), nm.DefaultDBProvider, nm.DefaultMetricsProvider(config.Instrumentation), @@ -483,7 +487,7 @@ node, err := nm.NewNode( config, pv, nodeKey, - proxy.NewLocalClientCreator(app), + abcicli.NewLocalClientCreator(app), nm.DefaultGenesisDocProviderFunc(config), nm.DefaultDBProvider, nm.DefaultMetricsProvider(config.Instrumentation), @@ -496,7 +500,7 @@ if err != nil { `NewNode` requires a few things including a configuration file, a private validator, a node key and a few others in order to construct the full node. -Note we use `proxy.NewLocalClientCreator` here to create a local client instead +Note we use `abcicli.NewLocalClientCreator` here to create a local client instead of one communicating through a socket or gRPC. [viper](https://github.com/spf13/viper) is being used for reading the config, @@ -564,7 +568,6 @@ defer func() { c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) <-c -os.Exit(0) ``` ## 1.5 Getting Up and Running diff --git a/docs/tutorials/readme.md b/docs/tutorials/readme.md index a60fba349..0216df800 100644 --- a/docs/tutorials/readme.md +++ b/docs/tutorials/readme.md @@ -4,4 +4,4 @@ parent: order: 2 --- -# Guides +# Tutorials diff --git a/docs/versions b/docs/versions index 7ae4a265a..c88a614bd 100644 --- a/docs/versions +++ b/docs/versions @@ -1,4 +1,4 @@ +master master v0.32.x v0.32 v0.33.x v0.33 v0.34.x v0.34 -master master diff --git a/go.mod b/go.mod index b6e0fea38..cfd8e7fe7 100644 --- a/go.mod +++ b/go.mod @@ -3,40 +3,40 @@ module github.com/tendermint/tendermint go 1.16 require ( - github.com/BurntSushi/toml v0.3.1 - github.com/Masterminds/squirrel v1.5.0 - github.com/Workiva/go-datastructures v1.0.53 - github.com/adlio/schema v1.1.13 + github.com/BurntSushi/toml v0.4.1 + github.com/adlio/schema v1.1.14 github.com/btcsuite/btcd v0.22.0-beta github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce github.com/fortytw2/leaktest v1.3.0 - github.com/go-kit/kit v0.11.0 + github.com/go-kit/kit v0.12.0 github.com/gogo/protobuf v1.3.2 github.com/golang/protobuf v1.5.2 - github.com/golangci/golangci-lint v1.41.1 + github.com/golangci/golangci-lint v1.42.1 github.com/google/orderedcode v0.0.1 github.com/google/uuid v1.3.0 github.com/gorilla/websocket v1.4.2 github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/lib/pq v1.10.2 + github.com/lib/pq v1.10.3 github.com/libp2p/go-buffer-pool v0.0.2 - github.com/minio/highwayhash v1.0.2 + github.com/mroth/weightedrand v0.4.1 github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b github.com/ory/dockertest v3.3.5+incompatible github.com/prometheus/client_golang v1.11.0 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/rs/cors v1.8.0 - github.com/rs/zerolog v1.23.0 + github.com/rs/zerolog v1.25.0 github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa github.com/spf13/cobra v1.2.1 - github.com/spf13/viper v1.8.1 + github.com/spf13/viper v1.9.0 github.com/stretchr/testify v1.7.0 github.com/tendermint/tm-db v0.6.4 - github.com/vektra/mockery/v2 v2.9.0 - golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b - golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 - google.golang.org/grpc v1.39.0 + github.com/vektra/mockery/v2 v2.9.4 + golang.org/x/crypto v0.0.0-20210915214749-c084706c2272 + golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + google.golang.org/grpc v1.41.0 gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect + pgregory.net/rapid v0.4.7 ) diff --git a/go.sum b/go.sum index 55e08e088..e767033e5 100644 --- a/go.sum +++ b/go.sum @@ -1,5 +1,6 @@ 4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a h1:wFEQiK85fRsEVF0CRrPAos5LoAryUsIX1kPW/WrIqFw= 4d63.com/gochecknoglobals v0.0.0-20201008074935-acfc0b28355a/go.mod h1:wfdC5ZjKSPr7CybKEcgJhUOgeAQW1+7WcyK8OvUilfo= +bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM= bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -22,6 +23,11 @@ cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmW cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= +cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= +cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= +cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= +cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= +cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= @@ -31,6 +37,7 @@ cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM7 cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= +cloud.google.com/go/firestore v1.6.0/go.mod h1:afJwI0vaXwAG54kI7A//lP/lSPDkQORQuMkv56TxEPU= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= @@ -44,15 +51,21 @@ cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RX cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/Antonboom/errname v0.1.4 h1:lGSlI42Gm4bI1e+IITtXJXvxFM8N7naWimVFKcb0McY= +github.com/Antonboom/errname v0.1.4/go.mod h1:jRXo3m0E0EuCnK3wbsSVH3X55Z4iTDLl6ZfCxwFj4TM= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM= github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs= +github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= +github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= @@ -60,10 +73,8 @@ github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3Q github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/squirrel v1.5.0 h1:JukIZisrUXadA9pl3rMkjhiamxiB0cXiu+HGp/Y8cY8= -github.com/Masterminds/squirrel v1.5.0/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= -github.com/Microsoft/go-winio v0.4.14 h1:+hMXMk01us9KgxGb7ftKQt2Xpf5hH/yky+TDA+qxleU= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= +github.com/Microsoft/go-winio v0.5.0 h1:Elr9Wn+sGKPlkaBvwu4mTrxtmOp3F3yV9qhaHbXGjwU= +github.com/Microsoft/go-winio v0.5.0/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEVMRuU21PR1EtLVZJmdB18Gu3Rw= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= @@ -72,15 +83,14 @@ github.com/OpenPeeDeeP/depguard v1.0.1 h1:VlW4R6jmBIv3/u1JNlawEvJMM4J+dPORPaZasQ github.com/OpenPeeDeeP/depguard v1.0.1/go.mod h1:xsIw86fROiiwelg+jB2uM9PiKihMMmUx/1V+TNhjQvM= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/StackExchange/wmi v1.2.1/go.mod h1:rcmrprowKIVzvc+NUiLncP2uuArMWLCbu9SBzvHz7e8= github.com/VividCortex/gohistogram v1.0.0 h1:6+hBz+qvs0JOrrNhhmR7lFxo5sINxBCGXrdtl/UvroE= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/Workiva/go-datastructures v1.0.53 h1:J6Y/52yX10Xc5JjXmGtWoSSxs3mZnGSaq37xZZh7Yig= -github.com/Workiva/go-datastructures v1.0.53/go.mod h1:1yZL+zfsztete+ePzZz/Zb1/t5BnDuE2Ya2MMGhzP6A= -github.com/adlio/schema v1.1.13 h1:LeNMVg5Z1FX+Qgz8tJUijBLRdcpbFUElz+d1489On98= -github.com/adlio/schema v1.1.13/go.mod h1:L5Z7tw+7lRK1Fnpi/LT/ooCP1elkXn0krMWBQHUhEDE= +github.com/adlio/schema v1.1.14 h1:lIjyp5/2wSuEOmeQGNPpaRsVGZRqz9A/B+PaMtEotaU= +github.com/adlio/schema v1.1.14/go.mod h1:hQveFEMiDlG/M9yz9RAajnH5DzT6nAfqOG9YkEQU2pg= github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= +github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= @@ -95,6 +105,7 @@ github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/ashanbrown/forbidigo v1.2.0 h1:RMlEFupPCxQ1IogYOQUnIQwGEUGK8g5vAPMRyJoSxbc= @@ -104,15 +115,17 @@ github.com/ashanbrown/makezero v0.0.0-20210520155254-b6261585ddde/go.mod h1:oG9D github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.36.30/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.38.68/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go-v2 v1.7.0/go.mod h1:tb9wi5s61kTDA5qCkcDbt3KRVV74GGslQkl/DRdX/P4= -github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.5.0/go.mod h1:acH3+MQoiMzozT/ivU+DbRg7Ooo2298RdRaWcOv+4vM= -github.com/aws/smithy-go v1.5.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= +github.com/aws/aws-sdk-go-v2 v1.9.1/go.mod h1:cK/D0BBs0b/oWPIcX/Z/obahJK1TT7IPVjy53i/mX/4= +github.com/aws/aws-sdk-go-v2/service/cloudwatch v1.8.1/go.mod h1:CM+19rL1+4dFWnOQKwDc7H1KwXTz+h61oUSHyhV0b3o= +github.com/aws/smithy-go v1.8.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= +github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= +github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= github.com/bkielbasa/cyclop v1.2.0 h1:7Jmnh0yL2DjKfw28p86YTd/B4lRGcNuu12sKE35sM7A= @@ -133,31 +146,38 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= -github.com/casbin/casbin/v2 v2.31.6/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= +github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= +github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charithe/durationcheck v0.0.8 h1:cnZrThioNW9gSV5JsRIXmkyHUbcDH7Y9hkzFDVc9/j0= github.com/charithe/durationcheck v0.0.8/go.mod h1:SSbRIBVfMjCi/kEB6K65XEA83D6prSM8ap1UCpNKtgg= github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af h1:spmv8nSH9h5oCQf40jt/ufBCt9j0/58u4G+rkeMqXGI= github.com/chavacava/garif v0.0.0-20210405164556-e8a0a408d6af/go.mod h1:Qjyv4H3//PWVzTeCezG2b9IRn6myJxJSr4TD/xo6ojU= +github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= +github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= +github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= +github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= +github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= -github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6 h1:NmTXa/uVnDyp0TY5MKi197+3HWcnYWfnHGyaFthlnGw= -github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= +github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ= +github.com/containerd/continuity v0.2.0 h1:j/9Wnn+hrEWjLvHuIxUU1YI5JjEjVlT2AA68cse9rwY= +github.com/containerd/continuity v0.2.0/go.mod h1:wCYX+dRqZdImhGucXOqTQn05AhX6EUDaGEMUzTFFpLg= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= @@ -175,8 +195,9 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:ma github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/daixiang0/gci v0.2.8 h1:1mrIGMBQsBu0P7j7m1M8Lb+ZeZxsZL+jyGX4YoMJJpg= -github.com/daixiang0/gci v0.2.8/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= +github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/daixiang0/gci v0.2.9 h1:iwJvwQpBZmMg31w+QQ6jsyZ54KEATn6/nfARbBNW294= +github.com/daixiang0/gci v0.2.9/go.mod h1:+4dZ7TISfSmqfAGv59ePaHfNzgGtIkHAhhdKggP1JAc= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -211,6 +232,7 @@ github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5y github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= +github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/esimonov/ifshort v1.0.2 h1:K5s1W2fGfkoWXsFlxBNqT6J0ZCncPaKrGM5qe0bni68= @@ -230,13 +252,16 @@ github.com/fatih/color v1.12.0 h1:mRhaKNwANqRgUBGKmnI5ZxEk7QXmjQeCcuYFMX2bfcc= github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/structtag v1.2.0 h1:/OdNE99OxoI/PqaW/SuSK9uxxT3f/tcSZgon/ssNSx4= github.com/fatih/structtag v1.2.0/go.mod h1:mBJUNpUnHmRKrKlQQlmCrh5PuhftFbNv8Ys4/aAZl94= +github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= +github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= +github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= github.com/fzipp/gocyclo v0.3.1 h1:A9UeX3HJSXTBzvHzhqoYVuE0eAhe+aM8XBCCwsPMZOc= github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E= @@ -250,13 +275,15 @@ github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.11.0 h1:IGmIEl7aHTYh6E2HlT+ptILBotjo4xl8PMDl852etiI= -github.com/go-kit/kit v0.11.0/go.mod h1:73/6Ixaufkvb5Osvkls8C79vuQ49Ba1rUEUYNSf+FUw= +github.com/go-kit/kit v0.12.0 h1:e4o3o3IsBfAKQh5Qbbiqyfu97Ku7jrO/JbohvztANh4= +github.com/go-kit/kit v0.12.0/go.mod h1:lHd+EkCZPIwYItmGDDRdhinkzX2A1sj+M9biaEaizzs= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM= +github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= @@ -288,20 +315,23 @@ github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/gofrs/flock v0.8.0 h1:MSdYClljsF3PbENUUEx85nkWfJSGfzYI9yEBZOJz6CY= -github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= @@ -310,6 +340,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= +github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -330,8 +361,9 @@ github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA= +github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= @@ -340,8 +372,8 @@ github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613 h1:9kfjN3AdxcbsZB github.com/golangci/go-misc v0.0.0-20180628070357-927a3d87b613/go.mod h1:SyvUF2NxV+sN8upjjeVYr5W7tyxaT1JVtvhKhOn2ii8= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.41.1 h1:KH28pTSqRu6DTXIAANl1sPXNCmqg4VEH21z6G9Wj4SM= -github.com/golangci/golangci-lint v1.41.1/go.mod h1:LPtcY3aAAU8wydHrKpnanx9Og8K/cblZSyGmI5CJZUk= +github.com/golangci/golangci-lint v1.42.1 h1:nC4WyrbdnNdohDVUoNKjy/4N4FTM1gCFaVeXecy6vzM= +github.com/golangci/golangci-lint v1.42.1/go.mod h1:MuInrVlgg2jq4do6XI1jbkErbVHVbwdrLLtGv6p2wPI= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA= github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg= github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA= @@ -367,12 +399,14 @@ github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/orderedcode v0.0.1 h1:UzfcAexk9Vhv8+9pNOgRu41f16lHq725vPwnSeiG/Us= github.com/google/orderedcode v0.0.1/go.mod h1:iVyU4/qPKHY5h/wSd6rZZCDcLJNxiWO6dvsYES2Sb20= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -387,18 +421,22 @@ github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLe github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0= github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ= -github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= github.com/gordonklaus/ineffassign v0.0.0-20210225214923-2e10b2664254 h1:Nb2aRlC404yz7gQIfRZxX9/MLvQiqXyiBTJtgAy6yrI= @@ -439,19 +477,24 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= -github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= +github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-hclog v0.12.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= +github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= +github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= +github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -474,7 +517,7 @@ github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKEN github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= +github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -503,17 +546,19 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d h1:XeSMXURZPtUffuWAaq90o6kLgZdgu+QA8wk4MPC8ikI= github.com/julz/importas v0.0.0-20210419104244-841f0c0fe66d/go.mod h1:oSFU2R4XK/P7kNBrnL/FEQlDGN1/6WoxXEjSSXO0DV0= +github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/k0kubun/colorstring v0.0.0-20150214042306-9440f1994b88/go.mod h1:3w7q1U84EfirKl04SVQ/s7nPm1ZPhiXd34z40TNz36k= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= @@ -525,7 +570,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= github.com/klauspost/compress v1.10.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.0/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.11.12/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= +github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= @@ -533,6 +579,7 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -544,22 +591,18 @@ github.com/kunwardeep/paralleltest v1.0.2/go.mod h1:ZPqNm1fVHPllh5LPVujzbVz1JN2G github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kyoh86/exportloopref v0.1.8 h1:5Ry/at+eFdkX9Vsdw3qU4YkvGtzuVfzT4X7S77LoN/M= github.com/kyoh86/exportloopref v0.1.8/go.mod h1:1tUcJeiioIs7VWe5gcOObrux3lb66+sBqGZrRkMwPgg= -github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= -github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= -github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= -github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= -github.com/ldez/gomoddirectives v0.2.1 h1:9pAcW9KRZW7HQjFwbozNvFMcNVwdCBufU7os5QUwLIY= -github.com/ldez/gomoddirectives v0.2.1/go.mod h1:sGicqkRgBOg//JfpXwkB9Hj0X5RyJ7mlACM5B9f6Me4= +github.com/ldez/gomoddirectives v0.2.2 h1:p9/sXuNFArS2RLc+UpYZSI4KQwGMEDWC/LbtF5OPFVg= +github.com/ldez/gomoddirectives v0.2.2/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0= github.com/ldez/tagliatelle v0.2.0 h1:693V8Bf1NdShJ8eu/s84QySA0J2VWBanVBa2WwXD/Wk= github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88= github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.9.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.3 h1:v9QZf2Sn6AmjXtQeFpdoq/eaNtYP6IN+7lcrygsIAtg= +github.com/lib/pq v1.10.3/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= @@ -582,8 +625,9 @@ github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= -github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= @@ -597,15 +641,15 @@ github.com/mbilski/exhaustivestruct v1.2.0 h1:wCBmUnSYufAHO6J4AVWY6ff+oxWxsVFrwg github.com/mbilski/exhaustivestruct v1.2.0/go.mod h1:OeTBVxQWoEmB2J2JCHmXWPJ0aksxSUOUy+nvtVEfzXc= github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81 h1:QASJXOGm2RZ5Ardbc86qNFvby9AqkLDibfChMtAg5QM= github.com/mgechev/dots v0.0.0-20190921121421-c36f7dcfbb81/go.mod h1:KQ7+USdGKfpPjXk4Ga+5XxQM4Lm4e3gAogrreFAYpOg= -github.com/mgechev/revive v1.0.7 h1:5kEWTY/W5a0Eiqnkn2BAWsRZpxbs1ft15PsyNC7Rml8= -github.com/mgechev/revive v1.0.7/go.mod h1:vuE5ox/4L/HDd63MCcCk3H6wTLQ6XXezRphJ8cJJOxY= +github.com/mgechev/revive v1.1.1 h1:mkXNHP14Y6tfq+ocnQaiKEtgJDM41yaoyQq4qn6TD/4= +github.com/mgechev/revive v1.1.1/go.mod h1:PKqk4L74K6wVNwY2b6fr+9Qqr/3hIsHVfZCJdbvozrY= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= +github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= -github.com/minio/highwayhash v1.0.2 h1:Aak5U0nElisjDCfPSG79Tgzkn2gl66NxOMspRrKnA/g= github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= @@ -619,19 +663,25 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.4.1 h1:CpVNEelQCZBooIPDn+AR3NpivK/TIKU8bDxdASFVQag= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo= +github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/moricho/tparallel v0.2.1 h1:95FytivzT6rYzdJLdtfn6m1bfFJylOJK41+lgv/EHf4= github.com/moricho/tparallel v0.2.1/go.mod h1:fXEIZxG2vdfl0ZF8b42f5a78EhjjD5mX8qUplsoSU4k= github.com/mozilla/scribe v0.0.0-20180711195314-fb71baf557c1/go.mod h1:FIczTrinKo8VaLxe6PWTPEXRXDIHz2QAwiaBaP5/4a8= -github.com/mozilla/tls-observatory v0.0.0-20210209181001-cf43108d6880/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= +github.com/mozilla/tls-observatory v0.0.0-20210609171429-7bc42856d2e5/go.mod h1:FUqVoUPHSEdDR0MnFM3Dh8AU0pZHLXUD127SAJGER/s= +github.com/mroth/weightedrand v0.4.1 h1:rHcbUBopmi/3x4nnrvwGJBhX9d0vk+KgoLUZeDP6YyI= +github.com/mroth/weightedrand v0.4.1/go.mod h1:3p2SIcC8al1YMzGhAIoXD+r9olo/g/cdJgAD905gyNE= +github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= @@ -639,9 +689,9 @@ github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0 github.com/nakabonne/nestif v0.3.0 h1:+yOViDGhg8ygGrmII72nV9B/zGxY188TYpfolntsaPw= github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c= github.com/nats-io/jwt v1.2.2/go.mod h1:/xX356yQA6LuXI9xWW7mZNpxgF2mBmGecH+Fj34sP5Q= -github.com/nats-io/jwt/v2 v2.0.2/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= -github.com/nats-io/nats-server/v2 v2.2.6/go.mod h1:sEnFaxqe09cDmfMgACxZbziXnhQFhwk+aKkZjBBRYrI= -github.com/nats-io/nats.go v1.11.0/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= +github.com/nats-io/jwt/v2 v2.0.3/go.mod h1:VRP+deawSXyhNjXmxPCHskrR6Mq50BqpEI5SEcNiGlY= +github.com/nats-io/nats-server/v2 v2.5.0/go.mod h1:Kj86UtrXAL6LwYRA6H4RqzkHhK0Vcv2ZnKD5WbQ1t3g= +github.com/nats-io/nats.go v1.12.1/go.mod h1:BPko4oXsySz4aSWeFgOHLZs3G4Jq4ZAyE6/zMCxRT6w= github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= @@ -649,8 +699,8 @@ github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 h1:4kuARK6Y6Fx github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354/go.mod h1:KSVJerMDfblTH7p5MZaTt+8zaT2iEk3AkVb9PQdZuE8= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/exhaustive v0.1.0 h1:kVlMw8h2LHPMGUVqUj6230oQjjTMFjwcZrnkhXzFfl8= -github.com/nishanths/exhaustive v0.1.0/go.mod h1:S1j9110vxV1ECdCudXRkeMnFQ/DQk9ajLT0Uf2MYZQQ= +github.com/nishanths/exhaustive v0.2.3 h1:+ANTMqRNrqwInnP9aszg/0jDo+zbXa4x66U19Bx/oTk= +github.com/nishanths/exhaustive v0.2.3/go.mod h1:bhIX678Nx8inLM9PbpvK1yv6oGtoP8BfaIeMzgBNKvc= github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= github.com/nishanths/predeclared v0.2.1 h1:1TXtjmy4f3YCFjTxRd8zcFHOmoUir+gp0ESzjFzG2sw= github.com/nishanths/predeclared v0.2.1/go.mod h1:HvkGJcA3naj4lOwnFXFDkFxVtSqQMB9sbB1usJ+xjQE= @@ -670,38 +720,42 @@ github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.16.1 h1:foqVmeWDD6yYpK+Yz3fHyNIxFYNxswxqNFjSKe+vI54= -github.com/onsi/ginkgo v1.16.1/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.11.0 h1:+CqWgvj0OZycCaqclBD1pxKHAU+tOkHmQIWvDHq2aug= -github.com/onsi/gomega v1.11.0/go.mod h1:azGKhqFUon9Vuj0YmTfLSmx0FUwqXYSTl5re8lQLTUg= +github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= +github.com/opencontainers/runc v1.0.2 h1:opHZMaswlyxz1OuGpBE53Dwe4/xF7EZTY0A2L/FpCOg= +github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0= +github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= +github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin/zipkin-go v0.2.5/go.mod h1:KpXfKdgRDnnhsxw4pNIH9Md5lyFqKUa4YDFlwRYAMyE= github.com/ory/dockertest v3.3.5+incompatible h1:iLLK6SQwIhcbrG783Dghaaa3WPzGc+4Emza6EbVUUGA= github.com/ory/dockertest v3.3.5+incompatible/go.mod h1:1vX4m9wsvi00u5bseYwXaSnhNrne+V0E6LAcBILJdPs= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= +github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.9.3 h1:zeC5b1GviRUyKYd6OJPvBU/mcVDVoL1OhT17FCt5dSQ= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= +github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhECwM= +github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= +github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= -github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -712,13 +766,14 @@ github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZ github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polyfloyd/go-errorlint v0.0.0-20210510181950-ab96adb96fea h1:Sk6Xawg57ZkjXmFYD1xCHSKN6FtYM+km51MM7Lveyyc= -github.com/polyfloyd/go-errorlint v0.0.0-20210510181950-ab96adb96fea/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= +github.com/polyfloyd/go-errorlint v0.0.0-20210722154253-910bb7978349 h1:Kq/3kL0k033ds3tyez5lFPrfQ74fNJ+OqCclRipubwA= +github.com/polyfloyd/go-errorlint v0.0.0-20210722154253-910bb7978349/go.mod h1:wi9BfjxjF/bwiZ701TzmfKu6UKC357IOAtNr0Td0Lvw= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= @@ -730,15 +785,19 @@ github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6T github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.26.0 h1:iMAkS2TDoNWnKM+Kopnx/8tnEStIfpYA0ur0xQzzhMQ= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= @@ -763,40 +822,40 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.0 h1:P2KMzcFwrPoSjkF1WLRPsp3UMLyql8L4v9hQpVeK5so= github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.18.0/go.mod h1:9nvC1axdVrAHcu/s9taAVfBuIdTZLVQmKQyvrUjF5+I= -github.com/rs/zerolog v1.23.0 h1:UskrK+saS9P9Y789yNNulYKdARjPZuS35B8gJF2x60g= -github.com/rs/zerolog v1.23.0/go.mod h1:6c7hFfxPOy7TacJc4Fcdi24/J0NKYGzjG8FWRI916Qo= +github.com/rs/zerolog v1.25.0 h1:Rj7XygbUHKUlDPcVdoLyR91fJBsduXj5fRxyqIQj/II= +github.com/rs/zerolog v1.25.0/go.mod h1:7KHcEGe0QZPOm2IE4Kpb5rTh6n1h2hIgS5OOnu1rUaI= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryancurrah/gomodguard v1.2.2 h1:ZJQeYHZ2kaJpojoQBaGqpsn5g7GMcePY36uUGW1umbs= -github.com/ryancurrah/gomodguard v1.2.2/go.mod h1:tpI+C/nzvfUR3bF28b5QHpTn/jM/zlGniI++6ZlIWeE= +github.com/ryancurrah/gomodguard v1.2.3 h1:ww2fsjqocGCAFamzvv/b8IsRduuHHeK2MHTcTxZTQX8= +github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoLtnBZ7/TEhXAbg= github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw= github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0KCtEdgEkHwDbigdA= github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= +github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE= github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA= github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI= github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa h1:0U2s5loxrTy6/VgfVoLuVLFJcURKLH49ie0zSch7gh4= github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/securego/gosec/v2 v2.8.0 h1:iHg9cVmHWf5n6/ijUJ4F10h5bKlNtvXmcWzRw0lxiKE= -github.com/securego/gosec/v2 v2.8.0/go.mod h1:hJZ6NT5TqoY+jmOsaxAV4cXoEdrMRLVaNPnSpUCvCZs= +github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= +github.com/securego/gosec/v2 v2.8.1 h1:Tyy/nsH39TYCOkqf5HAgRE+7B5D8sHDwPdXRgFWokh8= +github.com/securego/gosec/v2 v2.8.1/go.mod h1:pUmsq6+VyFEElJMUX+QB3p3LWNHXg1R3xh2ssVJPs8Q= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c h1:W65qqJCIOVP4jpqPQ0YvHYKwcMEMVWIzWC5iNQQfBTU= github.com/shazow/go-diff v0.0.0-20160112020656-b6b7b6733b8c/go.mod h1:/PevMnwAxekIXwN8qQyfc5gl2NlkB3CQlkizAbOkeBs= -github.com/shirou/gopsutil/v3 v3.21.5/go.mod h1:ghfMypLDrFSWN2c9cDYFLHyynQ+QUht0cv/18ZqVczw= +github.com/shirou/gopsutil/v3 v3.21.7/go.mod h1:RGl11Y7XMTQPmHh8F0ayC6haKNBgH4PXMJuTAcMOlz4= github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa h1:YJfZp12Z3AFhSBeXOlv4BO55RMwPn2NoQeDsrdWnBtY= github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa/go.mod h1:oJyF+mSPHbB5mVY2iO9KV3pTt/QbIkGaO8gQ2WrDbP4= @@ -813,12 +872,12 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.3.1 h1:nFm6S0SMdyzrzcmThSipiEubIDy8WEXKNZ0UOgiRpng= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= +github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= @@ -832,8 +891,9 @@ github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DM github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.7.1/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= -github.com/spf13/viper v1.8.1 h1:Kq1fyeebqsBfbjZj4EL7gj2IO0mMaiyjYUWcUsl2O44= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= +github.com/spf13/viper v1.9.0 h1:yR6EXjTp0y0cLN8OZg1CRZmOBdI88UcGkhgyJhu6nZk= +github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4= github.com/ssgreg/nlreturn/v2 v2.1.0 h1:6/s4Rc49L6Uo6RLjhWZGBpWWjfzk2yrf1nIW8m4wgVA= github.com/ssgreg/nlreturn/v2 v2.1.0/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -853,6 +913,7 @@ github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5Cc github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca h1:Ld/zXl5t4+D69SiV4JoN7kkfvJdOWlPpfxrzxpLMoUk= github.com/syndtr/goleveldb v1.0.1-0.20200815110645-5c35d600f0ca/go.mod h1:u2MKkTVTVJWe5D1rCvame8WqhBd88EuIwODJZ1VHCPM= github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b h1:HxLVTlqcHhFAz3nWUcuvpH7WuOMv8LQoCWmruLfFH2U= @@ -861,22 +922,22 @@ github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzH github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= github.com/tendermint/tm-db v0.6.4 h1:3N2jlnYQkXNQclQwd/eKV/NzlqPlfK21cpRRIx80XXQ= github.com/tendermint/tm-db v0.6.4/go.mod h1:dptYhIpJ2M5kUuenLr+Yyf3zQOv1SgBZcl8/BmWlMBw= -github.com/tetafro/godot v1.4.7 h1:zBaoSY4JRVVz33y/qnODsdaKj2yAaMr91HCbqHCifVc= -github.com/tetafro/godot v1.4.7/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= +github.com/tetafro/godot v1.4.9 h1:wsNd0RuUxISqqudFqchsSsMqsM188DoZVPBeKl87tP0= +github.com/tetafro/godot v1.4.9/go.mod h1:LR3CJpxDVGlYOWn3ZZg1PgNZdTUvzsZWu8xaEohUpn8= github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94 h1:ig99OeTyDwQWhPe2iw9lwfQVF1KB3Q4fpP3X7/2VBG8= github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tinylib/msgp v1.1.5/go.mod h1:eQsjooMTnV42mHu917E26IogZ2930nFyBQdofk10Udg= -github.com/tklauser/go-sysconf v0.3.4/go.mod h1:Cl2c8ZRWfHD5IrfHo9VN+FX9kCFjIOyVklgXycLB6ek= -github.com/tklauser/numcpus v0.2.1/go.mod h1:9aU+wOc6WjUIZEwWMP62PL/41d65P+iks1gBkr4QyP8= +github.com/tklauser/go-sysconf v0.3.7/go.mod h1:JZIdXh4RmBvZDBZ41ld2bGxRV3n4daiiqA3skYhAoQ4= +github.com/tklauser/numcpus v0.2.3/go.mod h1:vpEPS/JC+oZGGQ/My/vJnNsvMDQL6PwOqt8dsCw5j+E= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomarrell/wrapcheck/v2 v2.1.0 h1:LTzwrYlgBUwi9JldazhbJN84fN9nS2UNGrZIo2syqxE= -github.com/tomarrell/wrapcheck/v2 v2.1.0/go.mod h1:crK5eI4RGSUrb9duDTQ5GqcukbKZvi85vX6nbhsBAeI= +github.com/tomarrell/wrapcheck/v2 v2.3.0 h1:i3DNjtyyL1xwaBQOsPPk8LAcpayWfQv2rxNi9b/eEx4= +github.com/tomarrell/wrapcheck/v2 v2.3.0/go.mod h1:aF5rnkdtqNWP/gC7vPUO5pKsB0Oac2FDTQP4F+dpZMU= github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= github.com/tommy-muehle/go-mnd/v2 v2.4.0 h1:1t0f8Uiaq+fqKteUR4N9Umr6E99R+lDnLnq7PwX2PPE= github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw= -github.com/ttacon/chalk v0.0.0-20160626202418-22c06c80ed31/go.mod h1:onvgF043R+lC5RZ8IT9rBXDaEDnpnw/Cl+HFiw+v/7Q= +github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= +github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= @@ -887,15 +948,17 @@ github.com/ultraware/whitespace v0.0.4 h1:If7Va4cM03mpgrNH9k49/VOicWpGoG70XPBFFO github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/uudashr/gocognit v1.0.1 h1:MoG2fZ0b/Eo7NXoIwCVFLG5JED3qgQz5/NEE+rOsjPs= -github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM= +github.com/uudashr/gocognit v1.0.5 h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4= +github.com/uudashr/gocognit v1.0.5/go.mod h1:wgYz0mitoKOTysqxTDMOUXg+Jb5SvtihkfmugIZYpEA= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/valyala/fasthttp v1.16.0/go.mod h1:YOKImeEosDdBPnxc0gy7INqi3m1zK6A+xl6TwOBhHCA= github.com/valyala/quicktemplate v1.6.3/go.mod h1:fwPzK2fHuYEODzJ9pkw0ipCPNHZ2tD5KW4lOuSdPKzY= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= -github.com/vektra/mockery/v2 v2.9.0 h1:+3FhCL3EviR779mTzXwUuhPNnqFUA7sDnt9OFkXaFd4= -github.com/vektra/mockery/v2 v2.9.0/go.mod h1:2gU4Cf/f8YyC8oEaSXfCnZBMxMjMl/Ko205rlP0fO90= +github.com/vektra/mockery/v2 v2.9.4 h1:ZjpYWY+YLkDIKrKtFnYPxJax10lktcUapWZtOSg4g7g= +github.com/vektra/mockery/v2 v2.9.4/go.mod h1:2gU4Cf/f8YyC8oEaSXfCnZBMxMjMl/Ko205rlP0fO90= github.com/viki-org/dnscache v0.0.0-20130720023526-c70c1f23c5d8/go.mod h1:dniwbG03GafCjFohMDmz6Zc6oCuiqgH6tGNyXTkHzXE= +github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE= +github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= @@ -933,14 +996,18 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= +go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= +go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= +go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -957,9 +1024,16 @@ golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b h1:wSOdpTq0/eI46Ez/LkDwIsAKA71YP2SRKBODiRWM0as= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210915214749-c084706c2272 h1:3erb+vDS8lU1sxfDHF4/hhWyaXnhIaO+7RgL4fDZORA= +golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= @@ -970,6 +1044,7 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= +golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1040,13 +1115,19 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4 h1:4nGaVu0QrbjT/AK2PRLuQfQuh6DJve+pELhqTdAj3x0= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b h1:SXy8Ld8oKlcogOvUAh0J5Pm5RKzgYBMMxLxt6n5XW50= +golang.org/x/net v0.0.0-20211005001312-d4b1ae081e3b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1059,6 +1140,10 @@ golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1070,6 +1155,7 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1088,20 +1174,24 @@ golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191115151921-52ab43148777/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1126,6 +1216,7 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1133,16 +1224,27 @@ golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210217105451-b926d437f341/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40 h1:JWgyZ1qgdTaF3N3oxC+MdTV7qvEEgHo3otj+HB5CM7Q= +golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef h1:fPxZ3Umkct3LZ8gK9nbk+DWDJ9fstZa2grBn+lWVKPs= +golang.org/x/sys v0.0.0-20211004093028-2c5d950f24ef/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1152,19 +1254,23 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3 golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5 h1:i6eZZ+zk0SOf0xgBpEpPD18qWcJda6q1sxt3S0kzyUQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190110163146-51295c7ec13a/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190307163923-6a08e3108db3/go.mod h1:25r3+/G6/xytQM8iWZKq3Hn0kr0rgFKPUNVEL/dr3z4= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1239,8 +1345,6 @@ golang.org/x/tools v0.0.0-20200831203904-5a2aa26beb65/go.mod h1:Cj7w3i3Rnn0Xh82u golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= golang.org/x/tools v0.0.0-20201001104356-43ebab892c4c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201002184944-ecd9fd270d5d/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201011145850-ed2f50202694/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= -golang.org/x/tools v0.0.0-20201022035929-9cf592e881e9/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201023174141-c8cfbd0f21e6/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201028025901-8cd080b735b3/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -1255,14 +1359,21 @@ golang.org/x/tools v0.0.0-20210104081019-d8d6ddbec6ee/go.mod h1:emZCQorbCU4vsT4f golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3 h1:L69ShwSZEyCsLKoAxDKeMvLDZkumEe8gXUZAjab0tX8= golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.5 h1:ouewzE6p+/VEB31YYnTbEJdi8pFqKp4P4n85vwo3DHA= +golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo= +gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0= +gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw= +gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1286,6 +1397,12 @@ google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjR google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= +google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= +google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= +google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= +google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= +google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= +google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -1340,8 +1457,20 @@ google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c h1:wtujag7C+4D6KMoulW9YauvK2lgdvCMS260jsqqBXr0= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= +google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= +google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= +google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= +google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4 h1:ysnBoUyeL/H6RCvNRhWHjKoDEmguI+mPU+qHgK8qv/w= +google.golang.org/genproto v0.0.0-20210917145530-b395a37504d4/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -1366,9 +1495,15 @@ google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA5 google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0 h1:Klz8I9kdtkIN6EpHHUOMLCYhTn/2WAe5a0s1hcBkdTI= google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= +google.golang.org/grpc v1.41.0 h1:f+PlOh7QV4iIJkPrx5NQ7qaNGFQ3OTse67yaDHfju4E= +google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1398,8 +1533,9 @@ gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.62.0 h1:duBzk771uxoUuOlyRLkHsygud9+5lrlGjdFBb4mSKDU= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/ini.v1 v1.63.2 h1:tGK/CyBg7SMzb60vP1M03vNZ3VDu3wGQJwn7Sxi9r3c= +gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= @@ -1427,8 +1563,8 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= -honnef.co/go/tools v0.2.0 h1:ws8AfbgTX3oIczLPNPCu5166oBg9ST2vNs0rcht+mDE= -honnef.co/go/tools v0.2.0/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= +honnef.co/go/tools v0.2.1 h1:/EPr//+UMMXwMTkXvCCoaJDq8cpjMO80Ou+L4PDo2mY= +honnef.co/go/tools v0.2.1/go.mod h1:lPVVZ2BS5TfnjLyizF7o7hv7j9/L+8cZY2hLyjP9cGY= mvdan.cc/gofumpt v0.1.1 h1:bi/1aS/5W00E2ny5q65w9SnKpWEF/UIOqDYBILpo9rA= mvdan.cc/gofumpt v0.1.1/go.mod h1:yXG1r1WqZVKWbVRtBWKWX9+CxGYfA51nSomhM0woR48= mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed h1:WX1yoOaKQfddO/mLzdV4wptyWgoH/6hwLs7QHTixo0I= @@ -1437,7 +1573,10 @@ mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b h1:DxJ5nJdkhDlLok9K6qO+5290kphD mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4= mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7 h1:HT3e4Krq+IE44tiN36RvVEb6tvqeIdtsVSsxmNPqlFU= mvdan.cc/unparam v0.0.0-20210104141923-aac4ce9116a7/go.mod h1:hBpJkZE8H/sb+VRFvw2+rBpHNsTBcvSpk61hr8mzXZE= +pgregory.net/rapid v0.4.7 h1:MTNRktPuv5FNqOO151TM9mDTa+XHcX6ypYeISDVD14g= +pgregory.net/rapid v0.4.7/go.mod h1:UYpPVyjFHzYBGHIxLFoupi8vwk6rXNzRY9OMvVxFIOU= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= diff --git a/internal/blocksync/doc.go b/internal/blocksync/doc.go index 3111130e4..5f84b1261 100644 --- a/internal/blocksync/doc.go +++ b/internal/blocksync/doc.go @@ -13,14 +13,9 @@ will no longer blocksync and thus no longer run the blocksync process. Note, the blocksync reactor Service gossips entire block and relevant data such that each receiving peer may construct the entire view of the blocksync state. -There are currently two versions of the blocksync reactor Service: - -- v0: The initial implementation that is battle-tested, but whose test coverage - is lacking and is not formally verifiable. -- v2: The latest implementation that has much higher test coverage and is formally - verified. However, the current implementation of v2 is not as battle-tested and - is known to have various bugs that could make it unreliable in production - environments. +There is currently only one version of the blocksync reactor Service +that is battle-tested, but whose test coverage is lacking and is not +formally verified. The v0 blocksync reactor Service has one p2p channel, BlockchainChannel. This channel is responsible for handling messages that both request blocks and respond diff --git a/internal/blocksync/v0/pool.go b/internal/blocksync/pool.go similarity index 99% rename from internal/blocksync/v0/pool.go rename to internal/blocksync/pool.go index b3704f333..d10a32b0d 100644 --- a/internal/blocksync/v0/pool.go +++ b/internal/blocksync/pool.go @@ -1,4 +1,4 @@ -package v0 +package blocksync import ( "errors" diff --git a/internal/blocksync/v0/pool_test.go b/internal/blocksync/pool_test.go similarity index 99% rename from internal/blocksync/v0/pool_test.go rename to internal/blocksync/pool_test.go index 67617d2b7..cbe19acbe 100644 --- a/internal/blocksync/v0/pool_test.go +++ b/internal/blocksync/pool_test.go @@ -1,4 +1,4 @@ -package v0 +package blocksync import ( "fmt" diff --git a/internal/blocksync/v0/reactor.go b/internal/blocksync/reactor.go similarity index 77% rename from internal/blocksync/v0/reactor.go rename to internal/blocksync/reactor.go index c43959808..43c3e83cd 100644 --- a/internal/blocksync/v0/reactor.go +++ b/internal/blocksync/reactor.go @@ -1,4 +1,4 @@ -package v0 +package blocksync import ( "fmt" @@ -6,46 +6,22 @@ import ( "sync" "time" - bc "github.com/tendermint/tendermint/internal/blocksync" - cons "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/p2p" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - tmSync "github.com/tendermint/tendermint/libs/sync" + tmsync "github.com/tendermint/tendermint/libs/sync" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) -var ( - _ service.Service = (*Reactor)(nil) - - // ChannelShims contains a map of ChannelDescriptorShim objects, where each - // object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding - // p2p proto.Message the new p2p Channel is responsible for handling. - // - // - // TODO: Remove once p2p refactor is complete. - // ref: https://github.com/tendermint/tendermint/issues/5670 - ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - BlockchainChannel: { - MsgType: new(bcproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(BlockchainChannel), - Priority: 5, - SendQueueCapacity: 1000, - RecvBufferCapacity: 1024, - RecvMessageCapacity: bc.MaxMsgSize, - MaxSendBytes: 100, - }, - }, - } -) +var _ service.Service = (*Reactor)(nil) const ( - // BlockchainChannel is a channel for blocks and status updates - BlockchainChannel = p2p.ChannelID(0x40) + // BlockSyncChannel is a channel for blocks and status updates + BlockSyncChannel = p2p.ChannelID(0x40) trySyncIntervalMS = 10 @@ -59,8 +35,19 @@ const ( syncTimeout = 60 * time.Second ) +func GetChannelDescriptor() *p2p.ChannelDescriptor { + return &p2p.ChannelDescriptor{ + ID: BlockSyncChannel, + MessageType: new(bcproto.Message), + Priority: 5, + SendQueueCapacity: 1000, + RecvBufferCapacity: 1024, + RecvMessageCapacity: MaxMsgSize, + } +} + type consensusReactor interface { - // For when we switch from blockchain reactor and block sync to the consensus + // For when we switch from block sync reactor to the consensus // machine. SwitchToConsensus(state sm.State, skipWAL bool) } @@ -85,19 +72,19 @@ type Reactor struct { store *store.BlockStore pool *BlockPool consReactor consensusReactor - blockSync *tmSync.AtomicBool + blockSync *tmsync.AtomicBool - blockchainCh *p2p.Channel - // blockchainOutBridgeCh defines a channel that acts as a bridge between sending Envelope - // messages that the reactor will consume in processBlockchainCh and receiving messages + blockSyncCh *p2p.Channel + // blockSyncOutBridgeCh defines a channel that acts as a bridge between sending Envelope + // messages that the reactor will consume in processBlockSyncCh and receiving messages // from the peer updates channel and other goroutines. We do this instead of directly - // sending on blockchainCh.Out to avoid race conditions in the case where other goroutines - // send Envelopes directly to the to blockchainCh.Out channel, since processBlockchainCh - // may close the blockchainCh.Out channel at the same time that other goroutines send to - // blockchainCh.Out. - blockchainOutBridgeCh chan p2p.Envelope - peerUpdates *p2p.PeerUpdates - closeCh chan struct{} + // sending on blockSyncCh.Out to avoid race conditions in the case where other goroutines + // send Envelopes directly to the to blockSyncCh.Out channel, since processBlockSyncCh + // may close the blockSyncCh.Out channel at the same time that other goroutines send to + // blockSyncCh.Out. + blockSyncOutBridgeCh chan p2p.Envelope + peerUpdates *p2p.PeerUpdates + closeCh chan struct{} requestsCh <-chan BlockRequest errorsCh <-chan peerError @@ -107,7 +94,7 @@ type Reactor struct { // stopping the p2p Channel(s). poolWG sync.WaitGroup - metrics *cons.Metrics + metrics *consensus.Metrics syncStartTime time.Time } @@ -119,10 +106,10 @@ func NewReactor( blockExec *sm.BlockExecutor, store *store.BlockStore, consReactor consensusReactor, - blockchainCh *p2p.Channel, + blockSyncCh *p2p.Channel, peerUpdates *p2p.PeerUpdates, blockSync bool, - metrics *cons.Metrics, + metrics *consensus.Metrics, ) (*Reactor, error) { if state.LastBlockHeight != store.Height() { return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height()) @@ -137,23 +124,23 @@ func NewReactor( errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count. r := &Reactor{ - initialState: state, - blockExec: blockExec, - store: store, - pool: NewBlockPool(startHeight, requestsCh, errorsCh), - consReactor: consReactor, - blockSync: tmSync.NewBool(blockSync), - requestsCh: requestsCh, - errorsCh: errorsCh, - blockchainCh: blockchainCh, - blockchainOutBridgeCh: make(chan p2p.Envelope), - peerUpdates: peerUpdates, - closeCh: make(chan struct{}), - metrics: metrics, - syncStartTime: time.Time{}, + initialState: state, + blockExec: blockExec, + store: store, + pool: NewBlockPool(startHeight, requestsCh, errorsCh), + consReactor: consReactor, + blockSync: tmsync.NewBool(blockSync), + requestsCh: requestsCh, + errorsCh: errorsCh, + blockSyncCh: blockSyncCh, + blockSyncOutBridgeCh: make(chan p2p.Envelope), + peerUpdates: peerUpdates, + closeCh: make(chan struct{}), + metrics: metrics, + syncStartTime: time.Time{}, } - r.BaseService = *service.NewBaseService(logger, "Blockchain", r) + r.BaseService = *service.NewBaseService(logger, "BlockSync", r) return r, nil } @@ -169,12 +156,14 @@ func (r *Reactor) OnStart() error { if err := r.pool.Start(); err != nil { return err } + r.poolWG.Add(1) + go r.requestRoutine() r.poolWG.Add(1) go r.poolRoutine(false) } - go r.processBlockchainCh() + go r.processBlockSyncCh() go r.processPeerUpdates() return nil @@ -199,7 +188,7 @@ func (r *Reactor) OnStop() { // Wait for all p2p Channels to be closed before returning. This ensures we // can easily reason about synchronization of all p2p Channels and ensure no // panics will occur. - <-r.blockchainCh.Done() + <-r.blockSyncCh.Done() <-r.peerUpdates.Done() } @@ -214,7 +203,7 @@ func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID) return } - r.blockchainCh.Out <- p2p.Envelope{ + r.blockSyncCh.Out <- p2p.Envelope{ To: peerID, Message: &bcproto.BlockResponse{Block: blockProto}, } @@ -223,16 +212,16 @@ func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID) } r.Logger.Info("peer requesting a block we do not have", "peer", peerID, "height", msg.Height) - r.blockchainCh.Out <- p2p.Envelope{ + r.blockSyncCh.Out <- p2p.Envelope{ To: peerID, Message: &bcproto.NoBlockResponse{Height: msg.Height}, } } -// handleBlockchainMessage handles envelopes sent from peers on the -// BlockchainChannel. It returns an error only if the Envelope.Message is unknown +// handleBlockSyncMessage handles envelopes sent from peers on the +// BlockSyncChannel. It returns an error only if the Envelope.Message is unknown // for this channel. This should never be called outside of handleMessage. -func (r *Reactor) handleBlockchainMessage(envelope p2p.Envelope) error { +func (r *Reactor) handleBlockSyncMessage(envelope p2p.Envelope) error { logger := r.Logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { @@ -249,7 +238,7 @@ func (r *Reactor) handleBlockchainMessage(envelope p2p.Envelope) error { r.pool.AddBlock(envelope.From, block, block.Size()) case *bcproto.StatusRequest: - r.blockchainCh.Out <- p2p.Envelope{ + r.blockSyncCh.Out <- p2p.Envelope{ To: envelope.From, Message: &bcproto.StatusResponse{ Height: r.store.Height(), @@ -288,8 +277,8 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err r.Logger.Debug("received message", "message", envelope.Message, "peer", envelope.From) switch chID { - case BlockchainChannel: - err = r.handleBlockchainMessage(envelope) + case BlockSyncChannel: + err = r.handleBlockSyncMessage(envelope) default: err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) @@ -298,30 +287,30 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err return err } -// processBlockchainCh initiates a blocking process where we listen for and handle -// envelopes on the BlockchainChannel and blockchainOutBridgeCh. Any error encountered during -// message execution will result in a PeerError being sent on the BlockchainChannel. +// processBlockSyncCh initiates a blocking process where we listen for and handle +// envelopes on the BlockSyncChannel and blockSyncOutBridgeCh. Any error encountered during +// message execution will result in a PeerError being sent on the BlockSyncChannel. // When the reactor is stopped, we will catch the signal and close the p2p Channel // gracefully. -func (r *Reactor) processBlockchainCh() { - defer r.blockchainCh.Close() +func (r *Reactor) processBlockSyncCh() { + defer r.blockSyncCh.Close() for { select { - case envelope := <-r.blockchainCh.In: - if err := r.handleMessage(r.blockchainCh.ID, envelope); err != nil { - r.Logger.Error("failed to process message", "ch_id", r.blockchainCh.ID, "envelope", envelope, "err", err) - r.blockchainCh.Error <- p2p.PeerError{ + case envelope := <-r.blockSyncCh.In: + if err := r.handleMessage(r.blockSyncCh.ID, envelope); err != nil { + r.Logger.Error("failed to process message", "ch_id", r.blockSyncCh.ID, "envelope", envelope, "err", err) + r.blockSyncCh.Error <- p2p.PeerError{ NodeID: envelope.From, Err: err, } } - case envelope := <-r.blockchainOutBridgeCh: - r.blockchainCh.Out <- envelope + case envelope := <-r.blockSyncOutBridgeCh: + r.blockSyncCh.Out <- envelope case <-r.closeCh: - r.Logger.Debug("stopped listening on blockchain channel; closing...") + r.Logger.Debug("stopped listening on block sync channel; closing...") return } @@ -340,7 +329,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { switch peerUpdate.Status { case p2p.PeerStatusUp: // send a status update the newly added peer - r.blockchainOutBridgeCh <- p2p.Envelope{ + r.blockSyncOutBridgeCh <- p2p.Envelope{ To: peerUpdate.NodeID, Message: &bcproto.StatusResponse{ Base: r.store.Base(), @@ -384,6 +373,9 @@ func (r *Reactor) SwitchToBlockSync(state sm.State) error { r.syncStartTime = time.Now() + r.poolWG.Add(1) + go r.requestRoutine() + r.poolWG.Add(1) go r.poolRoutine(true) @@ -394,7 +386,6 @@ func (r *Reactor) requestRoutine() { statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second) defer statusUpdateTicker.Stop() - r.poolWG.Add(1) defer r.poolWG.Done() for { @@ -406,13 +397,13 @@ func (r *Reactor) requestRoutine() { return case request := <-r.requestsCh: - r.blockchainOutBridgeCh <- p2p.Envelope{ + r.blockSyncOutBridgeCh <- p2p.Envelope{ To: request.PeerID, Message: &bcproto.BlockRequest{Height: request.Height}, } case pErr := <-r.errorsCh: - r.blockchainCh.Error <- p2p.PeerError{ + r.blockSyncCh.Error <- p2p.PeerError{ NodeID: pErr.peerID, Err: pErr.err, } @@ -423,7 +414,7 @@ func (r *Reactor) requestRoutine() { go func() { defer r.poolWG.Done() - r.blockchainOutBridgeCh <- p2p.Envelope{ + r.blockSyncOutBridgeCh <- p2p.Envelope{ Broadcast: true, Message: &bcproto.StatusRequest{}, } @@ -455,8 +446,6 @@ func (r *Reactor) poolRoutine(stateSynced bool) { defer trySyncTicker.Stop() defer switchToConsensusTicker.Stop() - go r.requestRoutine() - defer r.poolWG.Done() FOR_LOOP: @@ -554,14 +543,14 @@ FOR_LOOP: // NOTE: We've already removed the peer's request, but we still need // to clean up the rest. peerID := r.pool.RedoRequest(first.Height) - r.blockchainCh.Error <- p2p.PeerError{ + r.blockSyncCh.Error <- p2p.PeerError{ NodeID: peerID, Err: err, } peerID2 := r.pool.RedoRequest(second.Height) if peerID2 != peerID { - r.blockchainCh.Error <- p2p.PeerError{ + r.blockSyncCh.Error <- p2p.PeerError{ NodeID: peerID2, Err: err, } @@ -605,6 +594,8 @@ FOR_LOOP: case <-r.closeCh: break FOR_LOOP + case <-r.pool.Quit(): + break FOR_LOOP } } } diff --git a/internal/blocksync/v0/reactor_test.go b/internal/blocksync/reactor_test.go similarity index 79% rename from internal/blocksync/v0/reactor_test.go rename to internal/blocksync/reactor_test.go index e038b57af..6bca8d4a9 100644 --- a/internal/blocksync/v0/reactor_test.go +++ b/internal/blocksync/reactor_test.go @@ -1,4 +1,4 @@ -package v0 +package blocksync import ( "os" @@ -6,22 +6,23 @@ import ( "time" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" - cons "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/mempool/mock" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + sf "github.com/tendermint/tendermint/internal/state/test/factory" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - sf "github.com/tendermint/tendermint/state/test/factory" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) type reactorTestSuite struct { @@ -32,9 +33,9 @@ type reactorTestSuite struct { reactors map[types.NodeID]*Reactor app map[types.NodeID]proxy.AppConns - blockchainChannels map[types.NodeID]*p2p.Channel - peerChans map[types.NodeID]chan p2p.PeerUpdate - peerUpdates map[types.NodeID]*p2p.PeerUpdates + blockSyncChannels map[types.NodeID]*p2p.Channel + peerChans map[types.NodeID]chan p2p.PeerUpdate + peerUpdates map[types.NodeID]*p2p.PeerUpdates blockSync bool } @@ -53,19 +54,19 @@ func setup( "must specify at least one block height (nodes)") rts := &reactorTestSuite{ - logger: log.TestingLogger().With("module", "blockchain", "testCase", t.Name()), - network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), - nodes: make([]types.NodeID, 0, numNodes), - reactors: make(map[types.NodeID]*Reactor, numNodes), - app: make(map[types.NodeID]proxy.AppConns, numNodes), - blockchainChannels: make(map[types.NodeID]*p2p.Channel, numNodes), - peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), - peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), - blockSync: true, + logger: log.TestingLogger().With("module", "block_sync", "testCase", t.Name()), + network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), + nodes: make([]types.NodeID, 0, numNodes), + reactors: make(map[types.NodeID]*Reactor, numNodes), + app: make(map[types.NodeID]proxy.AppConns, numNodes), + blockSyncChannels: make(map[types.NodeID]*p2p.Channel, numNodes), + peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), + peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), + blockSync: true, } - chDesc := p2p.ChannelDescriptor{ID: byte(BlockchainChannel)} - rts.blockchainChannels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(bcproto.Message), int(chBuf)) + chDesc := &p2p.ChannelDescriptor{ID: BlockSyncChannel, MessageType: new(bcproto.Message)} + rts.blockSyncChannels = rts.network.MakeChannelsNoCleanup(t, chDesc) i := 0 for nodeID := range rts.network.Nodes { @@ -97,7 +98,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T, t.Helper() rts.nodes = append(rts.nodes, nodeID) - rts.app[nodeID] = proxy.NewAppConns(proxy.NewLocalClientCreator(&abci.BaseApplication{})) + rts.app[nodeID] = proxy.NewAppConns(abciclient.NewLocalCreator(&abci.BaseApplication{}), proxy.NopMetrics()) require.NoError(t, rts.app[nodeID].Start()) blockDB := dbm.NewMemDB() @@ -161,10 +162,10 @@ func (rts *reactorTestSuite) addNode(t *testing.T, blockExec, blockStore, nil, - rts.blockchainChannels[nodeID], + rts.blockSyncChannels[nodeID], rts.peerUpdates[nodeID], rts.blockSync, - cons.NopMetrics()) + consensus.NopMetrics()) require.NoError(t, err) require.NoError(t, rts.reactors[nodeID].Start()) @@ -181,10 +182,10 @@ func (rts *reactorTestSuite) start(t *testing.T) { } func TestReactor_AbruptDisconnect(t *testing.T) { - config := cfg.ResetTestRoot("blockchain_reactor_test") - defer os.RemoveAll(config.RootDir) + cfg := config.ResetTestRoot("block_sync_reactor_test") + defer os.RemoveAll(cfg.RootDir) - genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30) + genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) maxBlockHeight := int64(64) rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) @@ -216,10 +217,10 @@ func TestReactor_AbruptDisconnect(t *testing.T) { } func TestReactor_SyncTime(t *testing.T) { - config := cfg.ResetTestRoot("blockchain_reactor_test") - defer os.RemoveAll(config.RootDir) + cfg := config.ResetTestRoot("block_sync_reactor_test") + defer os.RemoveAll(cfg.RootDir) - genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30) + genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) maxBlockHeight := int64(101) rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) @@ -239,10 +240,10 @@ func TestReactor_SyncTime(t *testing.T) { } func TestReactor_NoBlockResponse(t *testing.T) { - config := cfg.ResetTestRoot("blockchain_reactor_test") - defer os.RemoveAll(config.RootDir) + cfg := config.ResetTestRoot("block_sync_reactor_test") + defer os.RemoveAll(cfg.RootDir) - genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30) + genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) maxBlockHeight := int64(65) rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0) @@ -286,11 +287,11 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) { // See: https://github.com/tendermint/tendermint/issues/6005 t.SkipNow() - config := cfg.ResetTestRoot("blockchain_reactor_test") - defer os.RemoveAll(config.RootDir) + cfg := config.ResetTestRoot("block_sync_reactor_test") + defer os.RemoveAll(cfg.RootDir) maxBlockHeight := int64(48) - genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30) + genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30) rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0}, 1000) @@ -324,7 +325,7 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) { // // XXX: This causes a potential race condition. // See: https://github.com/tendermint/tendermint/issues/6005 - otherGenDoc, otherPrivVals := factory.RandGenesisDoc(config, 1, false, 30) + otherGenDoc, otherPrivVals := factory.RandGenesisDoc(cfg, 1, false, 30) newNode := rts.network.MakeNode(t, p2ptest.NodeOptions{ MaxPeers: uint16(len(rts.nodes) + 1), MaxConnected: uint16(len(rts.nodes) + 1), diff --git a/internal/blocksync/v2/internal/behavior/doc.go b/internal/blocksync/v2/internal/behavior/doc.go deleted file mode 100644 index c4bd06cce..000000000 --- a/internal/blocksync/v2/internal/behavior/doc.go +++ /dev/null @@ -1,42 +0,0 @@ -/* -Package Behavior provides a mechanism for reactors to report behavior of peers. - -Instead of a reactor calling the switch directly it will call the behavior module which will -handle the stoping and marking peer as good on behalf of the reactor. - -There are four different behaviors a reactor can report. - -1. bad message - -type badMessage struct { - explanation string -} - -This message will request the peer be stopped for an error - -2. message out of order - -type messageOutOfOrder struct { - explanation string -} - -This message will request the peer be stopped for an error - -3. consesnsus Vote - -type consensusVote struct { - explanation string -} - -This message will request the peer be marked as good - -4. block part - -type blockPart struct { - explanation string -} - -This message will request the peer be marked as good - -*/ -package behavior diff --git a/internal/blocksync/v2/internal/behavior/peer_behaviour.go b/internal/blocksync/v2/internal/behavior/peer_behaviour.go deleted file mode 100644 index 90948d888..000000000 --- a/internal/blocksync/v2/internal/behavior/peer_behaviour.go +++ /dev/null @@ -1,47 +0,0 @@ -package behavior - -import "github.com/tendermint/tendermint/types" - -// PeerBehavior is a struct describing a behavior a peer performed. -// `peerID` identifies the peer and reason characterizes the specific -// behavior performed by the peer. -type PeerBehavior struct { - peerID types.NodeID - reason interface{} -} - -type badMessage struct { - explanation string -} - -// BadMessage returns a badMessage PeerBehavior. -func BadMessage(peerID types.NodeID, explanation string) PeerBehavior { - return PeerBehavior{peerID: peerID, reason: badMessage{explanation}} -} - -type messageOutOfOrder struct { - explanation string -} - -// MessageOutOfOrder returns a messagOutOfOrder PeerBehavior. -func MessageOutOfOrder(peerID types.NodeID, explanation string) PeerBehavior { - return PeerBehavior{peerID: peerID, reason: messageOutOfOrder{explanation}} -} - -type consensusVote struct { - explanation string -} - -// ConsensusVote returns a consensusVote PeerBehavior. -func ConsensusVote(peerID types.NodeID, explanation string) PeerBehavior { - return PeerBehavior{peerID: peerID, reason: consensusVote{explanation}} -} - -type blockPart struct { - explanation string -} - -// BlockPart returns blockPart PeerBehavior. -func BlockPart(peerID types.NodeID, explanation string) PeerBehavior { - return PeerBehavior{peerID: peerID, reason: blockPart{explanation}} -} diff --git a/internal/blocksync/v2/internal/behavior/reporter.go b/internal/blocksync/v2/internal/behavior/reporter.go deleted file mode 100644 index c150a98d5..000000000 --- a/internal/blocksync/v2/internal/behavior/reporter.go +++ /dev/null @@ -1,87 +0,0 @@ -package behavior - -import ( - "errors" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" -) - -// Reporter provides an interface for reactors to report the behavior -// of peers synchronously to other components. -type Reporter interface { - Report(behavior PeerBehavior) error -} - -// SwitchReporter reports peer behavior to an internal Switch. -type SwitchReporter struct { - sw *p2p.Switch -} - -// NewSwitchReporter return a new SwitchReporter instance which wraps the Switch. -func NewSwitchReporter(sw *p2p.Switch) *SwitchReporter { - return &SwitchReporter{ - sw: sw, - } -} - -// Report reports the behavior of a peer to the Switch. -func (spbr *SwitchReporter) Report(behavior PeerBehavior) error { - peer := spbr.sw.Peers().Get(behavior.peerID) - if peer == nil { - return errors.New("peer not found") - } - - switch reason := behavior.reason.(type) { - case consensusVote, blockPart: - spbr.sw.MarkPeerAsGood(peer) - case badMessage: - spbr.sw.StopPeerForError(peer, reason.explanation) - case messageOutOfOrder: - spbr.sw.StopPeerForError(peer, reason.explanation) - default: - return errors.New("unknown reason reported") - } - - return nil -} - -// MockReporter is a concrete implementation of the Reporter -// interface used in reactor tests to ensure reactors report the correct -// behavior in manufactured scenarios. -type MockReporter struct { - mtx tmsync.RWMutex - pb map[types.NodeID][]PeerBehavior -} - -// NewMockReporter returns a Reporter which records all reported -// behaviors in memory. -func NewMockReporter() *MockReporter { - return &MockReporter{ - pb: map[types.NodeID][]PeerBehavior{}, - } -} - -// Report stores the PeerBehavior produced by the peer identified by peerID. -func (mpbr *MockReporter) Report(behavior PeerBehavior) error { - mpbr.mtx.Lock() - defer mpbr.mtx.Unlock() - mpbr.pb[behavior.peerID] = append(mpbr.pb[behavior.peerID], behavior) - - return nil -} - -// GetBehaviors returns all behaviors reported on the peer identified by peerID. -func (mpbr *MockReporter) GetBehaviors(peerID types.NodeID) []PeerBehavior { - mpbr.mtx.RLock() - defer mpbr.mtx.RUnlock() - if items, ok := mpbr.pb[peerID]; ok { - result := make([]PeerBehavior, len(items)) - copy(result, items) - - return result - } - - return []PeerBehavior{} -} diff --git a/internal/blocksync/v2/internal/behavior/reporter_test.go b/internal/blocksync/v2/internal/behavior/reporter_test.go deleted file mode 100644 index 861a63df0..000000000 --- a/internal/blocksync/v2/internal/behavior/reporter_test.go +++ /dev/null @@ -1,205 +0,0 @@ -package behavior_test - -import ( - "sync" - "testing" - - bh "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" - "github.com/tendermint/tendermint/types" -) - -// TestMockReporter tests the MockReporter's ability to store reported -// peer behavior in memory indexed by the peerID. -func TestMockReporter(t *testing.T) { - var peerID types.NodeID = "MockPeer" - pr := bh.NewMockReporter() - - behaviors := pr.GetBehaviors(peerID) - if len(behaviors) != 0 { - t.Error("Expected to have no behaviors reported") - } - - badMessage := bh.BadMessage(peerID, "bad message") - if err := pr.Report(badMessage); err != nil { - t.Error(err) - } - behaviors = pr.GetBehaviors(peerID) - if len(behaviors) != 1 { - t.Error("Expected the peer have one reported behavior") - } - - if behaviors[0] != badMessage { - t.Error("Expected Bad Message to have been reported") - } -} - -type scriptItem struct { - peerID types.NodeID - behavior bh.PeerBehavior -} - -// equalBehaviors returns true if a and b contain the same PeerBehaviors with -// the same freequencies and otherwise false. -func equalBehaviors(a []bh.PeerBehavior, b []bh.PeerBehavior) bool { - aHistogram := map[bh.PeerBehavior]int{} - bHistogram := map[bh.PeerBehavior]int{} - - for _, behavior := range a { - aHistogram[behavior]++ - } - - for _, behavior := range b { - bHistogram[behavior]++ - } - - if len(aHistogram) != len(bHistogram) { - return false - } - - for _, behavior := range a { - if aHistogram[behavior] != bHistogram[behavior] { - return false - } - } - - for _, behavior := range b { - if bHistogram[behavior] != aHistogram[behavior] { - return false - } - } - - return true -} - -// TestEqualPeerBehaviors tests that equalBehaviors can tell that two slices -// of peer behaviors can be compared for the behaviors they contain and the -// freequencies that those behaviors occur. -func TestEqualPeerBehaviors(t *testing.T) { - var ( - peerID types.NodeID = "MockPeer" - consensusVote = bh.ConsensusVote(peerID, "voted") - blockPart = bh.BlockPart(peerID, "blocked") - equals = []struct { - left []bh.PeerBehavior - right []bh.PeerBehavior - }{ - // Empty sets - {[]bh.PeerBehavior{}, []bh.PeerBehavior{}}, - // Single behaviors - {[]bh.PeerBehavior{consensusVote}, []bh.PeerBehavior{consensusVote}}, - // Equal Frequencies - {[]bh.PeerBehavior{consensusVote, consensusVote}, - []bh.PeerBehavior{consensusVote, consensusVote}}, - // Equal frequencies different orders - {[]bh.PeerBehavior{consensusVote, blockPart}, - []bh.PeerBehavior{blockPart, consensusVote}}, - } - unequals = []struct { - left []bh.PeerBehavior - right []bh.PeerBehavior - }{ - // Comparing empty sets to non empty sets - {[]bh.PeerBehavior{}, []bh.PeerBehavior{consensusVote}}, - // Different behaviors - {[]bh.PeerBehavior{consensusVote}, []bh.PeerBehavior{blockPart}}, - // Same behavior with different frequencies - {[]bh.PeerBehavior{consensusVote}, - []bh.PeerBehavior{consensusVote, consensusVote}}, - } - ) - - for _, test := range equals { - if !equalBehaviors(test.left, test.right) { - t.Errorf("expected %#v and %#v to be equal", test.left, test.right) - } - } - - for _, test := range unequals { - if equalBehaviors(test.left, test.right) { - t.Errorf("expected %#v and %#v to be unequal", test.left, test.right) - } - } -} - -// TestPeerBehaviorConcurrency constructs a scenario in which -// multiple goroutines are using the same MockReporter instance. -// This test reproduces the conditions in which MockReporter will -// be used within a Reactor `Receive` method tests to ensure thread safety. -func TestMockPeerBehaviorReporterConcurrency(t *testing.T) { - var ( - behaviorScript = []struct { - peerID types.NodeID - behaviors []bh.PeerBehavior - }{ - {"1", []bh.PeerBehavior{bh.ConsensusVote("1", "")}}, - {"2", []bh.PeerBehavior{bh.ConsensusVote("2", ""), bh.ConsensusVote("2", ""), bh.ConsensusVote("2", "")}}, - { - "3", - []bh.PeerBehavior{bh.BlockPart("3", ""), - bh.ConsensusVote("3", ""), - bh.BlockPart("3", ""), - bh.ConsensusVote("3", "")}}, - { - "4", - []bh.PeerBehavior{bh.ConsensusVote("4", ""), - bh.ConsensusVote("4", ""), - bh.ConsensusVote("4", ""), - bh.ConsensusVote("4", "")}}, - { - "5", - []bh.PeerBehavior{bh.BlockPart("5", ""), - bh.ConsensusVote("5", ""), - bh.BlockPart("5", ""), - bh.ConsensusVote("5", "")}}, - } - ) - - var receiveWg sync.WaitGroup - pr := bh.NewMockReporter() - scriptItems := make(chan scriptItem) - done := make(chan int) - numConsumers := 3 - for i := 0; i < numConsumers; i++ { - receiveWg.Add(1) - go func() { - defer receiveWg.Done() - for { - select { - case pb := <-scriptItems: - if err := pr.Report(pb.behavior); err != nil { - t.Error(err) - } - case <-done: - return - } - } - }() - } - - var sendingWg sync.WaitGroup - sendingWg.Add(1) - go func() { - defer sendingWg.Done() - for _, item := range behaviorScript { - for _, reason := range item.behaviors { - scriptItems <- scriptItem{item.peerID, reason} - } - } - }() - - sendingWg.Wait() - - for i := 0; i < numConsumers; i++ { - done <- 1 - } - - receiveWg.Wait() - - for _, items := range behaviorScript { - reported := pr.GetBehaviors(items.peerID) - if !equalBehaviors(reported, items.behaviors) { - t.Errorf("expected peer %s to have behaved \nExpected: %#v \nGot %#v \n", - items.peerID, items.behaviors, reported) - } - } -} diff --git a/internal/blocksync/v2/io.go b/internal/blocksync/v2/io.go deleted file mode 100644 index 743428516..000000000 --- a/internal/blocksync/v2/io.go +++ /dev/null @@ -1,187 +0,0 @@ -package v2 - -import ( - "errors" - - "github.com/gogo/protobuf/proto" - "github.com/tendermint/tendermint/internal/p2p" - bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" -) - -var ( - errPeerQueueFull = errors.New("peer queue full") -) - -type iIO interface { - sendBlockRequest(peer p2p.Peer, height int64) error - sendBlockToPeer(block *types.Block, peer p2p.Peer) error - sendBlockNotFound(height int64, peer p2p.Peer) error - sendStatusResponse(base, height int64, peer p2p.Peer) error - - sendStatusRequest(peer p2p.Peer) error - broadcastStatusRequest() error - - trySwitchToConsensus(state state.State, skipWAL bool) bool -} - -type switchIO struct { - sw *p2p.Switch -} - -func newSwitchIo(sw *p2p.Switch) *switchIO { - return &switchIO{ - sw: sw, - } -} - -const ( - // BlockchainChannel is a channel for blocks and status updates (`BlockStore` height) - BlockchainChannel = byte(0x40) -) - -type consensusReactor interface { - // for when we switch from blockchain reactor and block sync to - // the consensus machine - SwitchToConsensus(state state.State, skipWAL bool) -} - -func (sio *switchIO) sendBlockRequest(peer p2p.Peer, height int64) error { - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_BlockRequest{ - BlockRequest: &bcproto.BlockRequest{ - Height: height, - }, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - queued := peer.TrySend(BlockchainChannel, msgBytes) - if !queued { - return errPeerQueueFull - } - return nil -} - -func (sio *switchIO) sendStatusResponse(base int64, height int64, peer p2p.Peer) error { - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_StatusResponse{ - StatusResponse: &bcproto.StatusResponse{ - Height: height, - Base: base, - }, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return errPeerQueueFull - } - - return nil -} - -func (sio *switchIO) sendBlockToPeer(block *types.Block, peer p2p.Peer) error { - if block == nil { - panic("trying to send nil block") - } - - bpb, err := block.ToProto() - if err != nil { - return err - } - - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_BlockResponse{ - BlockResponse: &bcproto.BlockResponse{ - Block: bpb, - }, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return errPeerQueueFull - } - - return nil -} - -func (sio *switchIO) sendBlockNotFound(height int64, peer p2p.Peer) error { - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_NoBlockResponse{ - NoBlockResponse: &bcproto.NoBlockResponse{ - Height: height, - }, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return errPeerQueueFull - } - - return nil -} - -func (sio *switchIO) trySwitchToConsensus(state state.State, skipWAL bool) bool { - conR, ok := sio.sw.Reactor("CONSENSUS").(consensusReactor) - if ok { - conR.SwitchToConsensus(state, skipWAL) - } - return ok -} - -func (sio *switchIO) sendStatusRequest(peer p2p.Peer) error { - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_StatusRequest{ - StatusRequest: &bcproto.StatusRequest{}, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - if queued := peer.TrySend(BlockchainChannel, msgBytes); !queued { - return errPeerQueueFull - } - - return nil -} - -func (sio *switchIO) broadcastStatusRequest() error { - msgProto := &bcproto.Message{ - Sum: &bcproto.Message_StatusRequest{ - StatusRequest: &bcproto.StatusRequest{}, - }, - } - - msgBytes, err := proto.Marshal(msgProto) - if err != nil { - return err - } - - // XXX: maybe we should use an io specific peer list here - sio.sw.Broadcast(BlockchainChannel, msgBytes) - - return nil -} diff --git a/internal/blocksync/v2/metrics.go b/internal/blocksync/v2/metrics.go deleted file mode 100644 index c68ec6447..000000000 --- a/internal/blocksync/v2/metrics.go +++ /dev/null @@ -1,125 +0,0 @@ -package v2 - -import ( - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" -) - -const ( - // MetricsSubsystem is a subsystem shared by all metrics exposed by this - // package. - MetricsSubsystem = "blockchain" -) - -// Metrics contains metrics exposed by this package. -type Metrics struct { - // events_in - EventsIn metrics.Counter - // events_in - EventsHandled metrics.Counter - // events_out - EventsOut metrics.Counter - // errors_in - ErrorsIn metrics.Counter - // errors_handled - ErrorsHandled metrics.Counter - // errors_out - ErrorsOut metrics.Counter - // events_shed - EventsShed metrics.Counter - // events_sent - EventsSent metrics.Counter - // errors_sent - ErrorsSent metrics.Counter - // errors_shed - ErrorsShed metrics.Counter -} - -// PrometheusMetrics returns metrics for in and out events, errors, etc. handled by routines. -// Can we burn in the routine name here? -func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { - labels := []string{} - for i := 0; i < len(labelsAndValues); i += 2 { - labels = append(labels, labelsAndValues[i]) - } - return &Metrics{ - EventsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_in", - Help: "Events read from the channel.", - }, labels).With(labelsAndValues...), - EventsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_handled", - Help: "Events handled", - }, labels).With(labelsAndValues...), - EventsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_out", - Help: "Events output from routine.", - }, labels).With(labelsAndValues...), - ErrorsIn: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_in", - Help: "Errors read from the channel.", - }, labels).With(labelsAndValues...), - ErrorsHandled: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_handled", - Help: "Errors handled.", - }, labels).With(labelsAndValues...), - ErrorsOut: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_out", - Help: "Errors output from routine.", - }, labels).With(labelsAndValues...), - ErrorsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_sent", - Help: "Errors sent to routine.", - }, labels).With(labelsAndValues...), - ErrorsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "errors_shed", - Help: "Errors dropped from sending.", - }, labels).With(labelsAndValues...), - EventsSent: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_sent", - Help: "Events sent to routine.", - }, labels).With(labelsAndValues...), - EventsShed: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ - Namespace: namespace, - Subsystem: MetricsSubsystem, - Name: "events_shed", - Help: "Events dropped from sending.", - }, labels).With(labelsAndValues...), - } -} - -// NopMetrics returns no-op Metrics. -func NopMetrics() *Metrics { - return &Metrics{ - EventsIn: discard.NewCounter(), - EventsHandled: discard.NewCounter(), - EventsOut: discard.NewCounter(), - ErrorsIn: discard.NewCounter(), - ErrorsHandled: discard.NewCounter(), - ErrorsOut: discard.NewCounter(), - EventsShed: discard.NewCounter(), - EventsSent: discard.NewCounter(), - ErrorsSent: discard.NewCounter(), - ErrorsShed: discard.NewCounter(), - } -} diff --git a/internal/blocksync/v2/processor.go b/internal/blocksync/v2/processor.go deleted file mode 100644 index b448e7d8a..000000000 --- a/internal/blocksync/v2/processor.go +++ /dev/null @@ -1,193 +0,0 @@ -package v2 - -import ( - "fmt" - - tmState "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" -) - -// Events generated by the processor: -// block execution failure, event will indicate the peer(s) that caused the error -type pcBlockVerificationFailure struct { - priorityNormal - height int64 - firstPeerID types.NodeID - secondPeerID types.NodeID -} - -func (e pcBlockVerificationFailure) String() string { - return fmt.Sprintf("pcBlockVerificationFailure{%d 1st peer: %v, 2nd peer: %v}", - e.height, e.firstPeerID, e.secondPeerID) -} - -// successful block execution -type pcBlockProcessed struct { - priorityNormal - height int64 - peerID types.NodeID -} - -func (e pcBlockProcessed) String() string { - return fmt.Sprintf("pcBlockProcessed{%d peer: %v}", e.height, e.peerID) -} - -// processor has finished -type pcFinished struct { - priorityNormal - blocksSynced int - tmState tmState.State -} - -func (p pcFinished) Error() string { - return "finished" -} - -type queueItem struct { - block *types.Block - peerID types.NodeID -} - -type blockQueue map[int64]queueItem - -type pcState struct { - // blocks waiting to be processed - queue blockQueue - - // draining indicates that the next rProcessBlock event with a queue miss constitutes completion - draining bool - - // the number of blocks successfully synced by the processor - blocksSynced int - - // the processorContext which contains the processor dependencies - context processorContext -} - -func (state *pcState) String() string { - return fmt.Sprintf("height: %d queue length: %d draining: %v blocks synced: %d", - state.height(), len(state.queue), state.draining, state.blocksSynced) -} - -// newPcState returns a pcState initialized with the last verified block enqueued -func newPcState(context processorContext) *pcState { - return &pcState{ - queue: blockQueue{}, - draining: false, - blocksSynced: 0, - context: context, - } -} - -// nextTwo returns the next two unverified blocks -func (state *pcState) nextTwo() (queueItem, queueItem, error) { - if first, ok := state.queue[state.height()+1]; ok { - if second, ok := state.queue[state.height()+2]; ok { - return first, second, nil - } - } - return queueItem{}, queueItem{}, fmt.Errorf("not found") -} - -// synced returns true when at most the last verified block remains in the queue -func (state *pcState) synced() bool { - return len(state.queue) <= 1 -} - -func (state *pcState) enqueue(peerID types.NodeID, block *types.Block, height int64) { - if item, ok := state.queue[height]; ok { - panic(fmt.Sprintf( - "duplicate block %d (%X) enqueued by processor (sent by %v; existing block %X from %v)", - height, block.Hash(), peerID, item.block.Hash(), item.peerID)) - } - - state.queue[height] = queueItem{block: block, peerID: peerID} -} - -func (state *pcState) height() int64 { - return state.context.tmState().LastBlockHeight -} - -// purgePeer moves all unprocessed blocks from the queue -func (state *pcState) purgePeer(peerID types.NodeID) { - // what if height is less than state.height? - for height, item := range state.queue { - if item.peerID == peerID { - delete(state.queue, height) - } - } -} - -// handle processes FSM events -func (state *pcState) handle(event Event) (Event, error) { - switch event := event.(type) { - case bcResetState: - state.context.setState(event.state) - return noOp, nil - - case scFinishedEv: - if state.synced() { - return pcFinished{tmState: state.context.tmState(), blocksSynced: state.blocksSynced}, nil - } - state.draining = true - return noOp, nil - - case scPeerError: - state.purgePeer(event.peerID) - return noOp, nil - - case scBlockReceived: - if event.block == nil { - return noOp, nil - } - - // enqueue block if height is higher than state height, else ignore it - if event.block.Height > state.height() { - state.enqueue(event.peerID, event.block, event.block.Height) - } - return noOp, nil - - case rProcessBlock: - tmState := state.context.tmState() - firstItem, secondItem, err := state.nextTwo() - if err != nil { - if state.draining { - return pcFinished{tmState: tmState, blocksSynced: state.blocksSynced}, nil - } - return noOp, nil - } - - var ( - first, second = firstItem.block, secondItem.block - firstParts = first.MakePartSet(types.BlockPartSizeBytes) - firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstParts.Header()} - ) - - // verify if +second+ last commit "confirms" +first+ block - err = state.context.verifyCommit(tmState.ChainID, firstID, first.Height, second.LastCommit) - if err != nil { - state.purgePeer(firstItem.peerID) - if firstItem.peerID != secondItem.peerID { - state.purgePeer(secondItem.peerID) - } - return pcBlockVerificationFailure{ - height: first.Height, firstPeerID: firstItem.peerID, secondPeerID: secondItem.peerID}, - nil - } - - state.context.saveBlock(first, firstParts, second.LastCommit) - - if err := state.context.applyBlock(firstID, first); err != nil { - panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) - } - - state.context.recordConsMetrics(first) - - delete(state.queue, first.Height) - state.blocksSynced++ - - return pcBlockProcessed{height: first.Height, peerID: firstItem.peerID}, nil - } - - return noOp, nil -} diff --git a/internal/blocksync/v2/processor_context.go b/internal/blocksync/v2/processor_context.go deleted file mode 100644 index bc6852565..000000000 --- a/internal/blocksync/v2/processor_context.go +++ /dev/null @@ -1,112 +0,0 @@ -package v2 - -import ( - "fmt" - - cons "github.com/tendermint/tendermint/internal/consensus" - "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" -) - -type processorContext interface { - applyBlock(blockID types.BlockID, block *types.Block) error - verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error - saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) - tmState() state.State - setState(state.State) - recordConsMetrics(block *types.Block) -} - -type pContext struct { - store blockStore - applier blockApplier - state state.State - metrics *cons.Metrics -} - -func newProcessorContext(st blockStore, ex blockApplier, s state.State, m *cons.Metrics) *pContext { - return &pContext{ - store: st, - applier: ex, - state: s, - metrics: m, - } -} - -func (pc *pContext) applyBlock(blockID types.BlockID, block *types.Block) error { - newState, err := pc.applier.ApplyBlock(pc.state, blockID, block) - pc.state = newState - return err -} - -func (pc pContext) tmState() state.State { - return pc.state -} - -func (pc *pContext) setState(state state.State) { - pc.state = state -} - -func (pc pContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error { - return pc.state.Validators.VerifyCommitLight(chainID, blockID, height, commit) -} - -func (pc *pContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { - pc.store.SaveBlock(block, blockParts, seenCommit) -} - -func (pc *pContext) recordConsMetrics(block *types.Block) { - pc.metrics.RecordConsMetrics(block) -} - -type mockPContext struct { - applicationBL []int64 - verificationBL []int64 - state state.State -} - -func newMockProcessorContext( - state state.State, - verificationBlackList []int64, - applicationBlackList []int64) *mockPContext { - return &mockPContext{ - applicationBL: applicationBlackList, - verificationBL: verificationBlackList, - state: state, - } -} - -func (mpc *mockPContext) applyBlock(blockID types.BlockID, block *types.Block) error { - for _, h := range mpc.applicationBL { - if h == block.Height { - return fmt.Errorf("generic application error") - } - } - mpc.state.LastBlockHeight = block.Height - return nil -} - -func (mpc *mockPContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error { - for _, h := range mpc.verificationBL { - if h == height { - return fmt.Errorf("generic verification error") - } - } - return nil -} - -func (mpc *mockPContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { - -} - -func (mpc *mockPContext) setState(state state.State) { - mpc.state = state -} - -func (mpc *mockPContext) tmState() state.State { - return mpc.state -} - -func (mpc *mockPContext) recordConsMetrics(block *types.Block) { - -} diff --git a/internal/blocksync/v2/processor_test.go b/internal/blocksync/v2/processor_test.go deleted file mode 100644 index f7d51112b..000000000 --- a/internal/blocksync/v2/processor_test.go +++ /dev/null @@ -1,305 +0,0 @@ -package v2 - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - tmState "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" -) - -// pcBlock is a test helper structure with simple types. Its purpose is to help with test readability. -type pcBlock struct { - pid string - height int64 -} - -// params is a test structure used to create processor state. -type params struct { - height int64 - items []pcBlock - blocksSynced int - verBL []int64 - appBL []int64 - draining bool -} - -// makePcBlock makes an empty block. -func makePcBlock(height int64) *types.Block { - return &types.Block{Header: types.Header{Height: height}} -} - -// makeState takes test parameters and creates a specific processor state. -func makeState(p *params) *pcState { - var ( - tmState = tmState.State{LastBlockHeight: p.height} - context = newMockProcessorContext(tmState, p.verBL, p.appBL) - ) - state := newPcState(context) - - for _, item := range p.items { - state.enqueue(types.NodeID(item.pid), makePcBlock(item.height), item.height) - } - - state.blocksSynced = p.blocksSynced - state.draining = p.draining - return state -} - -func mBlockResponse(peerID types.NodeID, height int64) scBlockReceived { - return scBlockReceived{ - peerID: peerID, - block: makePcBlock(height), - } -} - -type pcFsmMakeStateValues struct { - currentState *params - event Event - wantState *params - wantNextEvent Event - wantErr error - wantPanic bool -} - -type testFields struct { - name string - steps []pcFsmMakeStateValues -} - -func executeProcessorTests(t *testing.T, tests []testFields) { - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - var state *pcState - for _, step := range tt.steps { - defer func() { - r := recover() - if (r != nil) != step.wantPanic { - t.Errorf("recover = %v, wantPanic = %v", r, step.wantPanic) - } - }() - - // First step must always initialize the currentState as state. - if step.currentState != nil { - state = makeState(step.currentState) - } - if state == nil { - panic("Bad (initial?) step") - } - - nextEvent, err := state.handle(step.event) - t.Log(state) - assert.Equal(t, step.wantErr, err) - assert.Equal(t, makeState(step.wantState), state) - assert.Equal(t, step.wantNextEvent, nextEvent) - // Next step may use the wantedState as their currentState. - state = makeState(step.wantState) - } - }) - } -} - -func TestRProcessPeerError(t *testing.T) { - tests := []testFields{ - { - name: "error for existing peer", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, - event: scPeerError{peerID: "P2"}, - wantState: ¶ms{items: []pcBlock{{"P1", 1}}}, - wantNextEvent: noOp, - }, - }, - }, - { - name: "error for unknown peer", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, - event: scPeerError{peerID: "P3"}, - wantState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, - wantNextEvent: noOp, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} - -func TestPcBlockResponse(t *testing.T) { - tests := []testFields{ - { - name: "add one block", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{}, event: mBlockResponse("P1", 1), - wantState: ¶ms{items: []pcBlock{{"P1", 1}}}, wantNextEvent: noOp, - }, - }, - }, - - { - name: "add two blocks", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{}, event: mBlockResponse("P1", 3), - wantState: ¶ms{items: []pcBlock{{"P1", 3}}}, wantNextEvent: noOp, - }, - { // use previous wantState as currentState, - event: mBlockResponse("P1", 4), - wantState: ¶ms{items: []pcBlock{{"P1", 3}, {"P1", 4}}}, wantNextEvent: noOp, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} - -func TestRProcessBlockSuccess(t *testing.T) { - tests := []testFields{ - { - name: "noop - no blocks over current height", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{}, event: rProcessBlock{}, - wantState: ¶ms{}, wantNextEvent: noOp, - }, - }, - }, - { - name: "noop - high new blocks", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 5, items: []pcBlock{{"P1", 30}, {"P2", 31}}}, event: rProcessBlock{}, - wantState: ¶ms{height: 5, items: []pcBlock{{"P1", 30}, {"P2", 31}}}, wantNextEvent: noOp, - }, - }, - }, - { - name: "blocks H+1 and H+2 present", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}}, event: rProcessBlock{}, - wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}}, blocksSynced: 1}, - wantNextEvent: pcBlockProcessed{height: 1, peerID: "P1"}, - }, - }, - }, - { - name: "blocks H+1 and H+2 present after draining", - steps: []pcFsmMakeStateValues{ - { // some contiguous blocks - on stop check draining is set - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P1", 4}}}, - event: scFinishedEv{}, - wantState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P1", 4}}, draining: true}, - wantNextEvent: noOp, - }, - { - event: rProcessBlock{}, - wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true}, - wantNextEvent: pcBlockProcessed{height: 1, peerID: "P1"}, - }, - { // finish when H+1 or/and H+2 are missing - event: rProcessBlock{}, - wantState: ¶ms{height: 1, items: []pcBlock{{"P2", 2}, {"P1", 4}}, blocksSynced: 1, draining: true}, - wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 1}, blocksSynced: 1}, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} - -func TestRProcessBlockFailures(t *testing.T) { - tests := []testFields{ - { - name: "blocks H+1 and H+2 present from different peers - H+1 verification fails ", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}, verBL: []int64{1}}, event: rProcessBlock{}, - wantState: ¶ms{items: []pcBlock{}, verBL: []int64{1}}, - wantNextEvent: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P2"}, - }, - }, - }, - { - name: "blocks H+1 and H+2 present from same peer - H+1 applyBlock fails ", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}}, appBL: []int64{1}}, event: rProcessBlock{}, - wantState: ¶ms{items: []pcBlock{}, appBL: []int64{1}}, wantPanic: true, - }, - }, - }, - { - name: "blocks H+1 and H+2 present from same peers - H+1 verification fails ", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 0, items: []pcBlock{{"P1", 1}, {"P1", 2}, {"P2", 3}}, - verBL: []int64{1}}, event: rProcessBlock{}, - wantState: ¶ms{height: 0, items: []pcBlock{{"P2", 3}}, verBL: []int64{1}}, - wantNextEvent: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"}, - }, - }, - }, - { - name: "blocks H+1 and H+2 present from different peers - H+1 applyBlock fails ", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{items: []pcBlock{{"P1", 1}, {"P2", 2}, {"P2", 3}}, appBL: []int64{1}}, - event: rProcessBlock{}, - wantState: ¶ms{items: []pcBlock{{"P2", 3}}, appBL: []int64{1}}, wantPanic: true, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} - -func TestScFinishedEv(t *testing.T) { - tests := []testFields{ - { - name: "no blocks", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100}, event: scFinishedEv{}, - wantState: ¶ms{height: 100, items: []pcBlock{}, blocksSynced: 100}, - wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 100}, blocksSynced: 100}, - }, - }, - }, - { - name: "maxHeight+1 block present", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 100, items: []pcBlock{ - {"P1", 101}}, blocksSynced: 100}, event: scFinishedEv{}, - wantState: ¶ms{height: 100, items: []pcBlock{{"P1", 101}}, blocksSynced: 100}, - wantNextEvent: pcFinished{tmState: tmState.State{LastBlockHeight: 100}, blocksSynced: 100}, - }, - }, - }, - { - name: "more blocks present", - steps: []pcFsmMakeStateValues{ - { - currentState: ¶ms{height: 100, items: []pcBlock{ - {"P1", 101}, {"P1", 102}}, blocksSynced: 100}, event: scFinishedEv{}, - wantState: ¶ms{height: 100, items: []pcBlock{ - {"P1", 101}, {"P1", 102}}, blocksSynced: 100, draining: true}, - wantNextEvent: noOp, - wantErr: nil, - }, - }, - }, - } - - executeProcessorTests(t, tests) -} diff --git a/internal/blocksync/v2/reactor.go b/internal/blocksync/v2/reactor.go deleted file mode 100644 index caa5d73f0..000000000 --- a/internal/blocksync/v2/reactor.go +++ /dev/null @@ -1,643 +0,0 @@ -package v2 - -import ( - "errors" - "fmt" - "time" - - proto "github.com/gogo/protobuf/proto" - - bc "github.com/tendermint/tendermint/internal/blocksync" - "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" - cons "github.com/tendermint/tendermint/internal/consensus" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/sync" - bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" -) - -const ( - // chBufferSize is the buffer size of all event channels. - chBufferSize int = 1000 -) - -type blockStore interface { - LoadBlock(height int64) *types.Block - SaveBlock(*types.Block, *types.PartSet, *types.Commit) - Base() int64 - Height() int64 -} - -// BlockchainReactor handles block sync protocol. -type BlockchainReactor struct { - p2p.BaseReactor - - blockSync *sync.AtomicBool // enable block sync on start when it's been Set - stateSynced bool // set to true when SwitchToBlockSync is called by state sync - scheduler *Routine - processor *Routine - logger log.Logger - - mtx tmsync.RWMutex - maxPeerHeight int64 - syncHeight int64 - events chan Event // non-nil during a block sync - - reporter behavior.Reporter - io iIO - store blockStore - - syncStartTime time.Time - syncStartHeight int64 - lastSyncRate float64 // # blocks sync per sec base on the last 100 blocks -} - -type blockApplier interface { - ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, error) -} - -// XXX: unify naming in this package around tmState -func newReactor(state state.State, store blockStore, reporter behavior.Reporter, - blockApplier blockApplier, blockSync bool, metrics *cons.Metrics) *BlockchainReactor { - initHeight := state.LastBlockHeight + 1 - if initHeight == 1 { - initHeight = state.InitialHeight - } - scheduler := newScheduler(initHeight, time.Now()) - pContext := newProcessorContext(store, blockApplier, state, metrics) - // TODO: Fix naming to just newProcesssor - // newPcState requires a processorContext - processor := newPcState(pContext) - - return &BlockchainReactor{ - scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize), - processor: newRoutine("processor", processor.handle, chBufferSize), - store: store, - reporter: reporter, - logger: log.NewNopLogger(), - blockSync: sync.NewBool(blockSync), - syncStartHeight: initHeight, - syncStartTime: time.Time{}, - lastSyncRate: 0, - } -} - -// NewBlockchainReactor creates a new reactor instance. -func NewBlockchainReactor( - state state.State, - blockApplier blockApplier, - store blockStore, - blockSync bool, - metrics *cons.Metrics) *BlockchainReactor { - reporter := behavior.NewMockReporter() - return newReactor(state, store, reporter, blockApplier, blockSync, metrics) -} - -// SetSwitch implements Reactor interface. -func (r *BlockchainReactor) SetSwitch(sw *p2p.Switch) { - r.Switch = sw - if sw != nil { - r.io = newSwitchIo(sw) - } else { - r.io = nil - } -} - -func (r *BlockchainReactor) setMaxPeerHeight(height int64) { - r.mtx.Lock() - defer r.mtx.Unlock() - if height > r.maxPeerHeight { - r.maxPeerHeight = height - } -} - -func (r *BlockchainReactor) setSyncHeight(height int64) { - r.mtx.Lock() - defer r.mtx.Unlock() - r.syncHeight = height -} - -// SyncHeight returns the height to which the BlockchainReactor has synced. -func (r *BlockchainReactor) SyncHeight() int64 { - r.mtx.RLock() - defer r.mtx.RUnlock() - return r.syncHeight -} - -// SetLogger sets the logger of the reactor. -func (r *BlockchainReactor) SetLogger(logger log.Logger) { - r.logger = logger - r.scheduler.setLogger(logger) - r.processor.setLogger(logger) -} - -// Start implements cmn.Service interface -func (r *BlockchainReactor) Start() error { - r.reporter = behavior.NewSwitchReporter(r.BaseReactor.Switch) - if r.blockSync.IsSet() { - err := r.startSync(nil) - if err != nil { - return fmt.Errorf("failed to start block sync: %w", err) - } - } - return nil -} - -// startSync begins a block sync, signaled by r.events being non-nil. If state is non-nil, -// the scheduler and processor is updated with this state on startup. -func (r *BlockchainReactor) startSync(state *state.State) error { - r.mtx.Lock() - defer r.mtx.Unlock() - if r.events != nil { - return errors.New("block sync already in progress") - } - r.events = make(chan Event, chBufferSize) - go r.scheduler.start() - go r.processor.start() - if state != nil { - <-r.scheduler.ready() - <-r.processor.ready() - r.scheduler.send(bcResetState{state: *state}) - r.processor.send(bcResetState{state: *state}) - } - go r.demux(r.events) - return nil -} - -// endSync ends a block sync -func (r *BlockchainReactor) endSync() { - r.mtx.Lock() - defer r.mtx.Unlock() - if r.events != nil { - close(r.events) - } - r.events = nil - r.scheduler.stop() - r.processor.stop() -} - -// SwitchToBlockSync is called by the state sync reactor when switching to block sync. -func (r *BlockchainReactor) SwitchToBlockSync(state state.State) error { - r.stateSynced = true - state = state.Copy() - - err := r.startSync(&state) - if err == nil { - r.syncStartTime = time.Now() - } - - return err -} - -// reactor generated ticker events: -// ticker for cleaning peers -type rTryPrunePeer struct { - priorityHigh - time time.Time -} - -func (e rTryPrunePeer) String() string { - return fmt.Sprintf("rTryPrunePeer{%v}", e.time) -} - -// ticker event for scheduling block requests -type rTrySchedule struct { - priorityHigh - time time.Time -} - -func (e rTrySchedule) String() string { - return fmt.Sprintf("rTrySchedule{%v}", e.time) -} - -// ticker for block processing -type rProcessBlock struct { - priorityNormal -} - -func (e rProcessBlock) String() string { - return "rProcessBlock" -} - -// reactor generated events based on blockchain related messages from peers: -// blockResponse message received from a peer -type bcBlockResponse struct { - priorityNormal - time time.Time - peerID types.NodeID - size int64 - block *types.Block -} - -func (resp bcBlockResponse) String() string { - return fmt.Sprintf("bcBlockResponse{%d#%X (size: %d bytes) from %v at %v}", - resp.block.Height, resp.block.Hash(), resp.size, resp.peerID, resp.time) -} - -// blockNoResponse message received from a peer -type bcNoBlockResponse struct { - priorityNormal - time time.Time - peerID types.NodeID - height int64 -} - -func (resp bcNoBlockResponse) String() string { - return fmt.Sprintf("bcNoBlockResponse{%v has no block at height %d at %v}", - resp.peerID, resp.height, resp.time) -} - -// statusResponse message received from a peer -type bcStatusResponse struct { - priorityNormal - time time.Time - peerID types.NodeID - base int64 - height int64 -} - -func (resp bcStatusResponse) String() string { - return fmt.Sprintf("bcStatusResponse{%v is at height %d (base: %d) at %v}", - resp.peerID, resp.height, resp.base, resp.time) -} - -// new peer is connected -type bcAddNewPeer struct { - priorityNormal - peerID types.NodeID -} - -func (resp bcAddNewPeer) String() string { - return fmt.Sprintf("bcAddNewPeer{%v}", resp.peerID) -} - -// existing peer is removed -type bcRemovePeer struct { - priorityHigh - peerID types.NodeID - reason interface{} -} - -func (resp bcRemovePeer) String() string { - return fmt.Sprintf("bcRemovePeer{%v due to %v}", resp.peerID, resp.reason) -} - -// resets the scheduler and processor state, e.g. following a switch from state syncing -type bcResetState struct { - priorityHigh - state state.State -} - -func (e bcResetState) String() string { - return fmt.Sprintf("bcResetState{%v}", e.state) -} - -// Takes the channel as a parameter to avoid race conditions on r.events. -func (r *BlockchainReactor) demux(events <-chan Event) { - var lastHundred = time.Now() - - var ( - processBlockFreq = 20 * time.Millisecond - doProcessBlockCh = make(chan struct{}, 1) - doProcessBlockTk = time.NewTicker(processBlockFreq) - ) - defer doProcessBlockTk.Stop() - - var ( - prunePeerFreq = 1 * time.Second - doPrunePeerCh = make(chan struct{}, 1) - doPrunePeerTk = time.NewTicker(prunePeerFreq) - ) - defer doPrunePeerTk.Stop() - - var ( - scheduleFreq = 20 * time.Millisecond - doScheduleCh = make(chan struct{}, 1) - doScheduleTk = time.NewTicker(scheduleFreq) - ) - defer doScheduleTk.Stop() - - var ( - statusFreq = 10 * time.Second - doStatusCh = make(chan struct{}, 1) - doStatusTk = time.NewTicker(statusFreq) - ) - defer doStatusTk.Stop() - doStatusCh <- struct{}{} // immediately broadcast to get status of existing peers - - // Memoize the scSchedulerFail error to avoid printing it every scheduleFreq. - var scSchedulerFailErr error - - // XXX: Extract timers to make testing atemporal - for { - select { - // Pacers: send at most per frequency but don't saturate - case <-doProcessBlockTk.C: - select { - case doProcessBlockCh <- struct{}{}: - default: - } - case <-doPrunePeerTk.C: - select { - case doPrunePeerCh <- struct{}{}: - default: - } - case <-doScheduleTk.C: - select { - case doScheduleCh <- struct{}{}: - default: - } - case <-doStatusTk.C: - select { - case doStatusCh <- struct{}{}: - default: - } - - // Tickers: perform tasks periodically - case <-doScheduleCh: - r.scheduler.send(rTrySchedule{time: time.Now()}) - case <-doPrunePeerCh: - r.scheduler.send(rTryPrunePeer{time: time.Now()}) - case <-doProcessBlockCh: - r.processor.send(rProcessBlock{}) - case <-doStatusCh: - if err := r.io.broadcastStatusRequest(); err != nil { - r.logger.Error("Error broadcasting status request", "err", err) - } - - // Events from peers. Closing the channel signals event loop termination. - case event, ok := <-events: - if !ok { - r.logger.Info("Stopping event processing") - return - } - switch event := event.(type) { - case bcStatusResponse: - r.setMaxPeerHeight(event.height) - r.scheduler.send(event) - case bcAddNewPeer, bcRemovePeer, bcBlockResponse, bcNoBlockResponse: - r.scheduler.send(event) - default: - r.logger.Error("Received unexpected event", "event", fmt.Sprintf("%T", event)) - } - - // Incremental events from scheduler - case event := <-r.scheduler.next(): - switch event := event.(type) { - case scBlockReceived: - r.processor.send(event) - case scPeerError: - r.processor.send(event) - if err := r.reporter.Report(behavior.BadMessage(event.peerID, "scPeerError")); err != nil { - r.logger.Error("Error reporting peer", "err", err) - } - case scBlockRequest: - peer := r.Switch.Peers().Get(event.peerID) - if peer == nil { - r.logger.Error("Wanted to send block request, but no such peer", "peerID", event.peerID) - continue - } - if err := r.io.sendBlockRequest(peer, event.height); err != nil { - r.logger.Error("Error sending block request", "err", err) - } - case scFinishedEv: - r.processor.send(event) - r.scheduler.stop() - case scSchedulerFail: - if scSchedulerFailErr != event.reason { - r.logger.Error("Scheduler failure", "err", event.reason.Error()) - scSchedulerFailErr = event.reason - } - case scPeersPruned: - // Remove peers from the processor. - for _, peerID := range event.peers { - r.processor.send(scPeerError{peerID: peerID, reason: errors.New("peer was pruned")}) - } - r.logger.Debug("Pruned peers", "count", len(event.peers)) - case noOpEvent: - default: - r.logger.Error("Received unexpected scheduler event", "event", fmt.Sprintf("%T", event)) - } - - // Incremental events from processor - case event := <-r.processor.next(): - switch event := event.(type) { - case pcBlockProcessed: - r.setSyncHeight(event.height) - if (r.syncHeight-r.syncStartHeight)%100 == 0 { - newSyncRate := 100 / time.Since(lastHundred).Seconds() - if r.lastSyncRate == 0 { - r.lastSyncRate = newSyncRate - } else { - r.lastSyncRate = 0.9*r.lastSyncRate + 0.1*newSyncRate - } - r.logger.Info("block sync Rate", "height", r.syncHeight, - "max_peer_height", r.maxPeerHeight, "blocks/s", r.lastSyncRate) - lastHundred = time.Now() - } - r.scheduler.send(event) - case pcBlockVerificationFailure: - r.scheduler.send(event) - case pcFinished: - r.logger.Info("block sync complete, switching to consensus") - if !r.io.trySwitchToConsensus(event.tmState, event.blocksSynced > 0 || r.stateSynced) { - r.logger.Error("Failed to switch to consensus reactor") - } - r.endSync() - r.blockSync.UnSet() - return - case noOpEvent: - default: - r.logger.Error("Received unexpected processor event", "event", fmt.Sprintf("%T", event)) - } - - // Terminal event from scheduler - case err := <-r.scheduler.final(): - switch err { - case nil: - r.logger.Info("Scheduler stopped") - default: - r.logger.Error("Scheduler aborted with error", "err", err) - } - - // Terminal event from processor - case err := <-r.processor.final(): - switch err { - case nil: - r.logger.Info("Processor stopped") - default: - r.logger.Error("Processor aborted with error", "err", err) - } - } - } -} - -// Stop implements cmn.Service interface. -func (r *BlockchainReactor) Stop() error { - r.logger.Info("reactor stopping") - r.endSync() - r.logger.Info("reactor stopped") - return nil -} - -// Receive implements Reactor by handling different message types. -// XXX: do not call any methods that can block or incur heavy processing. -// https://github.com/tendermint/tendermint/issues/2888 -func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - logger := r.logger.With("src", src.ID(), "chID", chID) - - msgProto := new(bcproto.Message) - - if err := proto.Unmarshal(msgBytes, msgProto); err != nil { - logger.Error("error decoding message", "err", err) - _ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error())) - return - } - - if err := msgProto.Validate(); err != nil { - logger.Error("peer sent us an invalid msg", "msg", msgProto, "err", err) - _ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error())) - return - } - - r.logger.Debug("received", "msg", msgProto) - - switch msg := msgProto.Sum.(type) { - case *bcproto.Message_StatusRequest: - if err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), src); err != nil { - logger.Error("Could not send status message to src peer") - } - - case *bcproto.Message_BlockRequest: - block := r.store.LoadBlock(msg.BlockRequest.Height) - if block != nil { - if err := r.io.sendBlockToPeer(block, src); err != nil { - logger.Error("Could not send block message to src peer", "err", err) - } - } else { - logger.Info("peer asking for a block we don't have", "height", msg.BlockRequest.Height) - if err := r.io.sendBlockNotFound(msg.BlockRequest.Height, src); err != nil { - logger.Error("Couldn't send block not found msg", "err", err) - } - } - - case *bcproto.Message_StatusResponse: - r.mtx.RLock() - if r.events != nil { - r.events <- bcStatusResponse{ - peerID: src.ID(), - base: msg.StatusResponse.Base, - height: msg.StatusResponse.Height, - } - } - r.mtx.RUnlock() - - case *bcproto.Message_BlockResponse: - bi, err := types.BlockFromProto(msg.BlockResponse.Block) - if err != nil { - logger.Error("error transitioning block from protobuf", "err", err) - _ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error())) - return - } - r.mtx.RLock() - if r.events != nil { - r.events <- bcBlockResponse{ - peerID: src.ID(), - block: bi, - size: int64(len(msgBytes)), - time: time.Now(), - } - } - r.mtx.RUnlock() - - case *bcproto.Message_NoBlockResponse: - r.mtx.RLock() - if r.events != nil { - r.events <- bcNoBlockResponse{ - peerID: src.ID(), - height: msg.NoBlockResponse.Height, - time: time.Now(), - } - } - r.mtx.RUnlock() - } -} - -// AddPeer implements Reactor interface -func (r *BlockchainReactor) AddPeer(peer p2p.Peer) { - err := r.io.sendStatusResponse(r.store.Base(), r.store.Height(), peer) - if err != nil { - r.logger.Error("could not send our status to the new peer", "peer", peer.ID, "err", err) - } - - err = r.io.sendStatusRequest(peer) - if err != nil { - r.logger.Error("could not send status request to the new peer", "peer", peer.ID, "err", err) - } - - r.mtx.RLock() - defer r.mtx.RUnlock() - if r.events != nil { - r.events <- bcAddNewPeer{peerID: peer.ID()} - } -} - -// RemovePeer implements Reactor interface. -func (r *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) { - r.mtx.RLock() - defer r.mtx.RUnlock() - if r.events != nil { - r.events <- bcRemovePeer{ - peerID: peer.ID(), - reason: reason, - } - } -} - -// GetChannels implements Reactor -func (r *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { - return []*p2p.ChannelDescriptor{ - { - ID: BlockchainChannel, - Priority: 5, - SendQueueCapacity: 2000, - RecvBufferCapacity: 1024, - RecvMessageCapacity: bc.MaxMsgSize, - }, - } -} - -func (r *BlockchainReactor) GetMaxPeerBlockHeight() int64 { - r.mtx.RLock() - defer r.mtx.RUnlock() - return r.maxPeerHeight -} - -func (r *BlockchainReactor) GetTotalSyncedTime() time.Duration { - if !r.blockSync.IsSet() || r.syncStartTime.IsZero() { - return time.Duration(0) - } - return time.Since(r.syncStartTime) -} - -func (r *BlockchainReactor) GetRemainingSyncTime() time.Duration { - if !r.blockSync.IsSet() { - return time.Duration(0) - } - - r.mtx.RLock() - defer r.mtx.RUnlock() - - targetSyncs := r.maxPeerHeight - r.syncStartHeight - currentSyncs := r.syncHeight - r.syncStartHeight + 1 - if currentSyncs < 0 || r.lastSyncRate < 0.001 { - return time.Duration(0) - } - - remain := float64(targetSyncs-currentSyncs) / r.lastSyncRate - - return time.Duration(int64(remain * float64(time.Second))) -} diff --git a/internal/blocksync/v2/reactor_test.go b/internal/blocksync/v2/reactor_test.go deleted file mode 100644 index 4120b3942..000000000 --- a/internal/blocksync/v2/reactor_test.go +++ /dev/null @@ -1,532 +0,0 @@ -package v2 - -import ( - "fmt" - "net" - "os" - "sync" - "testing" - "time" - - "github.com/gogo/protobuf/proto" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" - - abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" - cons "github.com/tendermint/tendermint/internal/consensus" - "github.com/tendermint/tendermint/internal/mempool/mock" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/internal/test/factory" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" - bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - sf "github.com/tendermint/tendermint/state/test/factory" - tmstore "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" -) - -type mockPeer struct { - service.Service - id types.NodeID -} - -func (mp mockPeer) FlushStop() {} -func (mp mockPeer) ID() types.NodeID { return mp.id } -func (mp mockPeer) RemoteIP() net.IP { return net.IP{} } -func (mp mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.RemoteIP(), Port: 8800} } - -func (mp mockPeer) IsOutbound() bool { return true } -func (mp mockPeer) IsPersistent() bool { return true } -func (mp mockPeer) CloseConn() error { return nil } - -func (mp mockPeer) NodeInfo() types.NodeInfo { - return types.NodeInfo{ - NodeID: "", - ListenAddr: "", - } -} -func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } -func (mp mockPeer) SocketAddr() *p2p.NetAddress { return &p2p.NetAddress{} } - -func (mp mockPeer) Send(byte, []byte) bool { return true } -func (mp mockPeer) TrySend(byte, []byte) bool { return true } - -func (mp mockPeer) Set(string, interface{}) {} -func (mp mockPeer) Get(string) interface{} { return struct{}{} } - -//nolint:unused -type mockBlockStore struct { - blocks map[int64]*types.Block -} - -//nolint:unused -func (ml *mockBlockStore) Height() int64 { - return int64(len(ml.blocks)) -} - -//nolint:unused -func (ml *mockBlockStore) LoadBlock(height int64) *types.Block { - return ml.blocks[height] -} - -//nolint:unused -func (ml *mockBlockStore) SaveBlock(block *types.Block, part *types.PartSet, commit *types.Commit) { - ml.blocks[block.Height] = block -} - -type mockBlockApplier struct { -} - -// XXX: Add whitelist/blacklist? -func (mba *mockBlockApplier) ApplyBlock( - state sm.State, blockID types.BlockID, block *types.Block, -) (sm.State, error) { - state.LastBlockHeight++ - return state, nil -} - -type mockSwitchIo struct { - mtx sync.Mutex - switchedToConsensus bool - numStatusResponse int - numBlockResponse int - numNoBlockResponse int - numStatusRequest int -} - -var _ iIO = (*mockSwitchIo)(nil) - -func (sio *mockSwitchIo) sendBlockRequest(_ p2p.Peer, _ int64) error { - return nil -} - -func (sio *mockSwitchIo) sendStatusResponse(_, _ int64, _ p2p.Peer) error { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.numStatusResponse++ - return nil -} - -func (sio *mockSwitchIo) sendBlockToPeer(_ *types.Block, _ p2p.Peer) error { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.numBlockResponse++ - return nil -} - -func (sio *mockSwitchIo) sendBlockNotFound(_ int64, _ p2p.Peer) error { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.numNoBlockResponse++ - return nil -} - -func (sio *mockSwitchIo) trySwitchToConsensus(_ sm.State, _ bool) bool { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.switchedToConsensus = true - return true -} - -func (sio *mockSwitchIo) broadcastStatusRequest() error { - return nil -} - -func (sio *mockSwitchIo) sendStatusRequest(_ p2p.Peer) error { - sio.mtx.Lock() - defer sio.mtx.Unlock() - sio.numStatusRequest++ - return nil -} - -type testReactorParams struct { - logger log.Logger - genDoc *types.GenesisDoc - privVals []types.PrivValidator - startHeight int64 - mockA bool -} - -func newTestReactor(t *testing.T, p testReactorParams) *BlockchainReactor { - store, state, _ := newReactorStore(t, p.genDoc, p.privVals, p.startHeight) - reporter := behavior.NewMockReporter() - - var appl blockApplier - - if p.mockA { - appl = &mockBlockApplier{} - } else { - app := &testApp{} - cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc) - err := proxyApp.Start() - require.NoError(t, err) - db := dbm.NewMemDB() - stateStore := sm.NewStore(db) - blockStore := tmstore.NewBlockStore(dbm.NewMemDB()) - appl = sm.NewBlockExecutor( - stateStore, p.logger, proxyApp.Consensus(), mock.Mempool{}, sm.EmptyEvidencePool{}, blockStore) - err = stateStore.Save(state) - require.NoError(t, err) - } - - r := newReactor(state, store, reporter, appl, true, cons.NopMetrics()) - logger := log.TestingLogger() - r.SetLogger(logger.With("module", "blockchain")) - - return r -} - -// This test is left here and not deleted to retain the termination cases for -// future improvement in [#4482](https://github.com/tendermint/tendermint/issues/4482). -// func TestReactorTerminationScenarios(t *testing.T) { - -// config := cfg.ResetTestRoot("blockchain_reactor_v2_test") -// defer os.RemoveAll(config.RootDir) -// genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30) -// refStore, _, _ := newReactorStore(genDoc, privVals, 20) - -// params := testReactorParams{ -// logger: log.TestingLogger(), -// genDoc: genDoc, -// privVals: privVals, -// startHeight: 10, -// bufferSize: 100, -// mockA: true, -// } - -// type testEvent struct { -// evType string -// peer string -// height int64 -// } - -// tests := []struct { -// name string -// params testReactorParams -// msgs []testEvent -// }{ -// { -// name: "simple termination on max peer height - one peer", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 11}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 12}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P1", height: 13}, -// {evType: "Process"}, -// }, -// }, -// { -// name: "simple termination on max peer height - two peers", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "AddPeer", peer: "P2"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "ReceiveS", peer: "P2", height: 15}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 11}, -// {evType: "ReceiveB", peer: "P2", height: 12}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P1", height: 13}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P2", height: 14}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 15}, -// {evType: "Process"}, -// }, -// }, -// { -// name: "termination on max peer height - two peers, noBlock error", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "AddPeer", peer: "P2"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "ReceiveS", peer: "P2", height: 15}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveNB", peer: "P1", height: 11}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 12}, -// {evType: "ReceiveB", peer: "P2", height: 11}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 13}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P2", height: 14}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 15}, -// {evType: "Process"}, -// }, -// }, -// { -// name: "termination on max peer height - two peers, remove one peer", -// params: params, -// msgs: []testEvent{ -// {evType: "AddPeer", peer: "P1"}, -// {evType: "AddPeer", peer: "P2"}, -// {evType: "ReceiveS", peer: "P1", height: 13}, -// {evType: "ReceiveS", peer: "P2", height: 15}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "RemovePeer", peer: "P1"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 12}, -// {evType: "ReceiveB", peer: "P2", height: 11}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 13}, -// {evType: "Process"}, -// {evType: "ReceiveB", peer: "P2", height: 14}, -// {evType: "Process"}, -// {evType: "BlockReq"}, -// {evType: "ReceiveB", peer: "P2", height: 15}, -// {evType: "Process"}, -// }, -// }, -// } - -// for _, tt := range tests { -// tt := tt -// t.Run(tt.name, func(t *testing.T) { -// reactor := newTestReactor(params) -// reactor.Start() -// reactor.reporter = behavior.NewMockReporter() -// mockSwitch := &mockSwitchIo{switchedToConsensus: false} -// reactor.io = mockSwitch -// // time for go routines to start -// time.Sleep(time.Millisecond) - -// for _, step := range tt.msgs { -// switch step.evType { -// case "AddPeer": -// reactor.scheduler.send(bcAddNewPeer{peerID: p2p.ID(step.peer)}) -// case "RemovePeer": -// reactor.scheduler.send(bcRemovePeer{peerID: p2p.ID(step.peer)}) -// case "ReceiveS": -// reactor.scheduler.send(bcStatusResponse{ -// peerID: p2p.ID(step.peer), -// height: step.height, -// time: time.Now(), -// }) -// case "ReceiveB": -// reactor.scheduler.send(bcBlockResponse{ -// peerID: p2p.ID(step.peer), -// block: refStore.LoadBlock(step.height), -// size: 10, -// time: time.Now(), -// }) -// case "ReceiveNB": -// reactor.scheduler.send(bcNoBlockResponse{ -// peerID: p2p.ID(step.peer), -// height: step.height, -// time: time.Now(), -// }) -// case "BlockReq": -// reactor.scheduler.send(rTrySchedule{time: time.Now()}) -// case "Process": -// reactor.processor.send(rProcessBlock{}) -// } -// // give time for messages to propagate between routines -// time.Sleep(time.Millisecond) -// } - -// // time for processor to finish and reactor to switch to consensus -// time.Sleep(20 * time.Millisecond) -// assert.True(t, mockSwitch.hasSwitchedToConsensus()) -// reactor.Stop() -// }) -// } -// } - -func TestReactorHelperMode(t *testing.T) { - var ( - channelID = byte(0x40) - ) - - config := cfg.ResetTestRoot("blockchain_reactor_v2_test") - defer os.RemoveAll(config.RootDir) - genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30) - - params := testReactorParams{ - logger: log.TestingLogger(), - genDoc: genDoc, - privVals: privVals, - startHeight: 20, - mockA: true, - } - - type testEvent struct { - peer string - event interface{} - } - - tests := []struct { - name string - params testReactorParams - msgs []testEvent - }{ - { - name: "status request", - params: params, - msgs: []testEvent{ - {"P1", bcproto.StatusRequest{}}, - {"P1", bcproto.BlockRequest{Height: 13}}, - {"P1", bcproto.BlockRequest{Height: 20}}, - {"P1", bcproto.BlockRequest{Height: 22}}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - reactor := newTestReactor(t, params) - mockSwitch := &mockSwitchIo{switchedToConsensus: false} - reactor.io = mockSwitch - err := reactor.Start() - require.NoError(t, err) - - for i := 0; i < len(tt.msgs); i++ { - step := tt.msgs[i] - switch ev := step.event.(type) { - case bcproto.StatusRequest: - old := mockSwitch.numStatusResponse - - msgProto := new(bcproto.Message) - require.NoError(t, msgProto.Wrap(&ev)) - - msgBz, err := proto.Marshal(msgProto) - require.NoError(t, err) - - reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz) - assert.Equal(t, old+1, mockSwitch.numStatusResponse) - case bcproto.BlockRequest: - if ev.Height > params.startHeight { - old := mockSwitch.numNoBlockResponse - - msgProto := new(bcproto.Message) - require.NoError(t, msgProto.Wrap(&ev)) - - msgBz, err := proto.Marshal(msgProto) - require.NoError(t, err) - - reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz) - assert.Equal(t, old+1, mockSwitch.numNoBlockResponse) - } else { - old := mockSwitch.numBlockResponse - - msgProto := new(bcproto.Message) - require.NoError(t, msgProto.Wrap(&ev)) - - msgBz, err := proto.Marshal(msgProto) - require.NoError(t, err) - - reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz) - assert.Equal(t, old+1, mockSwitch.numBlockResponse) - } - } - } - err = reactor.Stop() - require.NoError(t, err) - }) - } -} - -func TestReactorSetSwitchNil(t *testing.T) { - config := cfg.ResetTestRoot("blockchain_reactor_v2_test") - defer os.RemoveAll(config.RootDir) - genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30) - - reactor := newTestReactor(t, testReactorParams{ - logger: log.TestingLogger(), - genDoc: genDoc, - privVals: privVals, - }) - reactor.SetSwitch(nil) - - assert.Nil(t, reactor.Switch) - assert.Nil(t, reactor.io) -} - -type testApp struct { - abci.BaseApplication -} - -func newReactorStore( - t *testing.T, - genDoc *types.GenesisDoc, - privVals []types.PrivValidator, - maxBlockHeight int64) (*tmstore.BlockStore, sm.State, *sm.BlockExecutor) { - t.Helper() - - require.Len(t, privVals, 1) - app := &testApp{} - cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc) - err := proxyApp.Start() - if err != nil { - panic(fmt.Errorf("error start app: %w", err)) - } - - stateDB := dbm.NewMemDB() - blockStore := tmstore.NewBlockStore(dbm.NewMemDB()) - stateStore := sm.NewStore(stateDB) - state, err := sm.MakeGenesisState(genDoc) - require.NoError(t, err) - - blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), - mock.Mempool{}, sm.EmptyEvidencePool{}, blockStore) - err = stateStore.Save(state) - require.NoError(t, err) - - // add blocks in - for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { - lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil) - if blockHeight > 1 { - lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1) - lastBlock := blockStore.LoadBlock(blockHeight - 1) - vote, err := factory.MakeVote( - privVals[0], - lastBlock.Header.ChainID, 0, - lastBlock.Header.Height, 0, 2, - lastBlockMeta.BlockID, - time.Now(), - ) - require.NoError(t, err) - lastCommit = types.NewCommit(vote.Height, vote.Round, - lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) - } - - thisBlock := sf.MakeBlock(state, blockHeight, lastCommit) - - thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) - blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} - - state, err = blockExec.ApplyBlock(state, blockID, thisBlock) - require.NoError(t, err) - - blockStore.SaveBlock(thisBlock, thisParts, lastCommit) - } - return blockStore, state, blockExec -} diff --git a/internal/blocksync/v2/routine.go b/internal/blocksync/v2/routine.go deleted file mode 100644 index e4ca52add..000000000 --- a/internal/blocksync/v2/routine.go +++ /dev/null @@ -1,166 +0,0 @@ -package v2 - -import ( - "fmt" - "strings" - "sync/atomic" - - "github.com/Workiva/go-datastructures/queue" - - "github.com/tendermint/tendermint/libs/log" -) - -type handleFunc = func(event Event) (Event, error) - -const historySize = 25 - -// Routine is a structure that models a finite state machine as serialized -// stream of events processed by a handle function. This Routine structure -// handles the concurrency and messaging guarantees. Events are sent via -// `send` are handled by the `handle` function to produce an iterator -// `next()`. Calling `stop()` on a routine will conclude processing of all -// sent events and produce `final()` event representing the terminal state. -type Routine struct { - name string - handle handleFunc - queue *queue.PriorityQueue - history []Event - out chan Event - fin chan error - rdy chan struct{} - running *uint32 - logger log.Logger - metrics *Metrics -} - -func newRoutine(name string, handleFunc handleFunc, bufferSize int) *Routine { - return &Routine{ - name: name, - handle: handleFunc, - queue: queue.NewPriorityQueue(bufferSize, true), - history: make([]Event, 0, historySize), - out: make(chan Event, bufferSize), - rdy: make(chan struct{}, 1), - fin: make(chan error, 1), - running: new(uint32), - logger: log.NewNopLogger(), - metrics: NopMetrics(), - } -} - -func (rt *Routine) setLogger(logger log.Logger) { - rt.logger = logger -} - -// nolint:unused -func (rt *Routine) setMetrics(metrics *Metrics) { - rt.metrics = metrics -} - -func (rt *Routine) start() { - rt.logger.Info(fmt.Sprintf("%s: run", rt.name)) - running := atomic.CompareAndSwapUint32(rt.running, uint32(0), uint32(1)) - if !running { - panic(fmt.Sprintf("%s is already running", rt.name)) - } - close(rt.rdy) - defer func() { - if r := recover(); r != nil { - var ( - b strings.Builder - j int - ) - for i := len(rt.history) - 1; i >= 0; i-- { - fmt.Fprintf(&b, "%d: %+v\n", j, rt.history[i]) - j++ - } - panic(fmt.Sprintf("%v\nlast events:\n%v", r, b.String())) - } - stopped := atomic.CompareAndSwapUint32(rt.running, uint32(1), uint32(0)) - if !stopped { - panic(fmt.Sprintf("%s is failed to stop", rt.name)) - } - }() - - for { - events, err := rt.queue.Get(1) - if err == queue.ErrDisposed { - rt.terminate(nil) - return - } else if err != nil { - rt.terminate(err) - return - } - oEvent, err := rt.handle(events[0].(Event)) - rt.metrics.EventsHandled.With("routine", rt.name).Add(1) - if err != nil { - rt.terminate(err) - return - } - rt.metrics.EventsOut.With("routine", rt.name).Add(1) - rt.logger.Debug(fmt.Sprintf("%s: produced %T %+v", rt.name, oEvent, oEvent)) - - // Skip rTrySchedule and rProcessBlock events as they clutter the history - // due to their frequency. - switch events[0].(type) { - case rTrySchedule: - case rProcessBlock: - default: - rt.history = append(rt.history, events[0].(Event)) - if len(rt.history) > historySize { - rt.history = rt.history[1:] - } - } - - rt.out <- oEvent - } -} - -// XXX: look into returning OpError in the net package -func (rt *Routine) send(event Event) bool { - rt.logger.Debug(fmt.Sprintf("%s: received %T %+v", rt.name, event, event)) - if !rt.isRunning() { - return false - } - err := rt.queue.Put(event) - if err != nil { - rt.metrics.EventsShed.With("routine", rt.name).Add(1) - rt.logger.Error(fmt.Sprintf("%s: send failed, queue was full/stopped", rt.name)) - return false - } - - rt.metrics.EventsSent.With("routine", rt.name).Add(1) - return true -} - -func (rt *Routine) isRunning() bool { - return atomic.LoadUint32(rt.running) == 1 -} - -func (rt *Routine) next() chan Event { - return rt.out -} - -func (rt *Routine) ready() chan struct{} { - return rt.rdy -} - -func (rt *Routine) stop() { - if !rt.isRunning() { // XXX: this should check rt.queue.Disposed() - return - } - - rt.logger.Info(fmt.Sprintf("%s: stop", rt.name)) - rt.queue.Dispose() // this should block until all queue items are free? -} - -func (rt *Routine) final() chan error { - return rt.fin -} - -// XXX: Maybe get rid of this -func (rt *Routine) terminate(reason error) { - // We don't close the rt.out channel here, to avoid spinning on the closed channel - // in the event loop. - rt.fin <- reason -} diff --git a/internal/blocksync/v2/routine_test.go b/internal/blocksync/v2/routine_test.go deleted file mode 100644 index 8f92bee3e..000000000 --- a/internal/blocksync/v2/routine_test.go +++ /dev/null @@ -1,163 +0,0 @@ -package v2 - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -type eventA struct { - priorityNormal -} - -var errDone = fmt.Errorf("done") - -func simpleHandler(event Event) (Event, error) { - if _, ok := event.(eventA); ok { - return noOp, errDone - } - return noOp, nil -} - -func TestRoutineFinal(t *testing.T) { - var ( - bufferSize = 10 - routine = newRoutine("simpleRoutine", simpleHandler, bufferSize) - ) - - assert.False(t, routine.isRunning(), - "expected an initialized routine to not be running") - go routine.start() - <-routine.ready() - assert.True(t, routine.isRunning(), - "expected an started routine") - - assert.True(t, routine.send(eventA{}), - "expected sending to a ready routine to succeed") - - assert.Equal(t, errDone, <-routine.final(), - "expected the final event to be done") - - assert.False(t, routine.isRunning(), - "expected an completed routine to no longer be running") -} - -func TestRoutineStop(t *testing.T) { - var ( - bufferSize = 10 - routine = newRoutine("simpleRoutine", simpleHandler, bufferSize) - ) - - assert.False(t, routine.send(eventA{}), - "expected sending to an unstarted routine to fail") - - go routine.start() - <-routine.ready() - - assert.True(t, routine.send(eventA{}), - "expected sending to a running routine to succeed") - - routine.stop() - - assert.False(t, routine.send(eventA{}), - "expected sending to a stopped routine to fail") -} - -type finalCount struct { - count int -} - -func (f finalCount) Error() string { - return "end" -} - -func genStatefulHandler(maxCount int) handleFunc { - counter := 0 - return func(event Event) (Event, error) { - if _, ok := event.(eventA); ok { - counter++ - if counter >= maxCount { - return noOp, finalCount{counter} - } - - return eventA{}, nil - } - return noOp, nil - } -} - -func feedback(r *Routine) { - for event := range r.next() { - r.send(event) - } -} - -func TestStatefulRoutine(t *testing.T) { - var ( - count = 10 - handler = genStatefulHandler(count) - bufferSize = 20 - routine = newRoutine("statefulRoutine", handler, bufferSize) - ) - - go routine.start() - go feedback(routine) - <-routine.ready() - - assert.True(t, routine.send(eventA{}), - "expected sending to a started routine to succeed") - - final := <-routine.final() - if fnl, ok := final.(finalCount); ok { - assert.Equal(t, count, fnl.count, - "expected the routine to count to 10") - } else { - t.Fail() - } -} - -type lowPriorityEvent struct { - priorityLow -} - -type highPriorityEvent struct { - priorityHigh -} - -func handleWithPriority(event Event) (Event, error) { - switch event.(type) { - case lowPriorityEvent: - return noOp, nil - case highPriorityEvent: - return noOp, errDone - } - return noOp, nil -} - -func TestPriority(t *testing.T) { - var ( - bufferSize = 20 - routine = newRoutine("priorityRoutine", handleWithPriority, bufferSize) - ) - - go routine.start() - <-routine.ready() - go func() { - for { - routine.send(lowPriorityEvent{}) - time.Sleep(1 * time.Millisecond) - } - }() - time.Sleep(10 * time.Millisecond) - - assert.True(t, routine.isRunning(), - "expected an started routine") - assert.True(t, routine.send(highPriorityEvent{}), - "expected send to succeed even when saturated") - - assert.Equal(t, errDone, <-routine.final()) - assert.False(t, routine.isRunning(), - "expected an started routine") -} diff --git a/internal/blocksync/v2/scheduler.go b/internal/blocksync/v2/scheduler.go deleted file mode 100644 index b731d96a4..000000000 --- a/internal/blocksync/v2/scheduler.go +++ /dev/null @@ -1,711 +0,0 @@ -package v2 - -import ( - "bytes" - "errors" - "fmt" - "math" - "sort" - "time" - - "github.com/tendermint/tendermint/types" -) - -// Events generated by the scheduler: -// all blocks have been processed -type scFinishedEv struct { - priorityNormal - reason string -} - -func (e scFinishedEv) String() string { - return fmt.Sprintf("scFinishedEv{%v}", e.reason) -} - -// send a blockRequest message -type scBlockRequest struct { - priorityNormal - peerID types.NodeID - height int64 -} - -func (e scBlockRequest) String() string { - return fmt.Sprintf("scBlockRequest{%d from %v}", e.height, e.peerID) -} - -// a block has been received and validated by the scheduler -type scBlockReceived struct { - priorityNormal - peerID types.NodeID - block *types.Block -} - -func (e scBlockReceived) String() string { - return fmt.Sprintf("scBlockReceived{%d#%X from %v}", e.block.Height, e.block.Hash(), e.peerID) -} - -// scheduler detected a peer error -type scPeerError struct { - priorityHigh - peerID types.NodeID - reason error -} - -func (e scPeerError) String() string { - return fmt.Sprintf("scPeerError{%v errored with %v}", e.peerID, e.reason) -} - -// scheduler removed a set of peers (timed out or slow peer) -type scPeersPruned struct { - priorityHigh - peers []types.NodeID -} - -func (e scPeersPruned) String() string { - return fmt.Sprintf("scPeersPruned{%v}", e.peers) -} - -// XXX: make this fatal? -// scheduler encountered a fatal error -type scSchedulerFail struct { - priorityHigh - reason error -} - -func (e scSchedulerFail) String() string { - return fmt.Sprintf("scSchedulerFail{%v}", e.reason) -} - -type blockState int - -const ( - blockStateUnknown blockState = iota + 1 // no known peer has this block - blockStateNew // indicates that a peer has reported having this block - blockStatePending // indicates that this block has been requested from a peer - blockStateReceived // indicates that this block has been received by a peer - blockStateProcessed // indicates that this block has been applied -) - -func (e blockState) String() string { - switch e { - case blockStateUnknown: - return "Unknown" - case blockStateNew: - return "New" - case blockStatePending: - return "Pending" - case blockStateReceived: - return "Received" - case blockStateProcessed: - return "Processed" - default: - return fmt.Sprintf("invalid blockState: %d", e) - } -} - -type peerState int - -const ( - peerStateNew = iota + 1 - peerStateReady - peerStateRemoved -) - -func (e peerState) String() string { - switch e { - case peerStateNew: - return "New" - case peerStateReady: - return "Ready" - case peerStateRemoved: - return "Removed" - default: - panic(fmt.Sprintf("unknown peerState: %d", e)) - } -} - -type scPeer struct { - peerID types.NodeID - - // initialized as New when peer is added, updated to Ready when statusUpdate is received, - // updated to Removed when peer is removed - state peerState - - base int64 // updated when statusResponse is received - height int64 // updated when statusResponse is received - lastTouched time.Time - lastRate int64 // last receive rate in bytes -} - -func (p scPeer) String() string { - return fmt.Sprintf("{state %v, base %d, height %d, lastTouched %v, lastRate %d, id %v}", - p.state, p.base, p.height, p.lastTouched, p.lastRate, p.peerID) -} - -func newScPeer(peerID types.NodeID) *scPeer { - return &scPeer{ - peerID: peerID, - state: peerStateNew, - base: -1, - height: -1, - lastTouched: time.Time{}, - } -} - -// The scheduler keep track of the state of each block and each peer. The -// scheduler will attempt to schedule new block requests with `trySchedule` -// events and remove slow peers with `tryPrune` events. -type scheduler struct { - initHeight int64 - - // next block that needs to be processed. All blocks with smaller height are - // in Processed state. - height int64 - - // lastAdvance tracks the last time a block execution happened. - // syncTimeout is the maximum time the scheduler waits to advance in the block sync process before finishing. - // This covers the cases where there are no peers or all peers have a lower height. - lastAdvance time.Time - syncTimeout time.Duration - - // a map of peerID to scheduler specific peer struct `scPeer` used to keep - // track of peer specific state - peers map[types.NodeID]*scPeer - peerTimeout time.Duration // maximum response time from a peer otherwise prune - minRecvRate int64 // minimum receive rate from peer otherwise prune - - // the maximum number of blocks that should be New, Received or Pending at any point - // in time. This is used to enforce a limit on the blockStates map. - targetPending int - // a list of blocks to be scheduled (New), Pending or Received. Its length should be - // smaller than targetPending. - blockStates map[int64]blockState - - // a map of heights to the peer we are waiting a response from - pendingBlocks map[int64]types.NodeID - - // the time at which a block was put in blockStatePending - pendingTime map[int64]time.Time - - // a map of heights to the peers that put the block in blockStateReceived - receivedBlocks map[int64]types.NodeID -} - -func (sc scheduler) String() string { - return fmt.Sprintf("ih: %d, bst: %v, peers: %v, pblks: %v, ptm %v, rblks: %v", - sc.initHeight, sc.blockStates, sc.peers, sc.pendingBlocks, sc.pendingTime, sc.receivedBlocks) -} - -func newScheduler(initHeight int64, startTime time.Time) *scheduler { - sc := scheduler{ - initHeight: initHeight, - lastAdvance: startTime, - syncTimeout: 60 * time.Second, - height: initHeight, - blockStates: make(map[int64]blockState), - peers: make(map[types.NodeID]*scPeer), - pendingBlocks: make(map[int64]types.NodeID), - pendingTime: make(map[int64]time.Time), - receivedBlocks: make(map[int64]types.NodeID), - targetPending: 10, // TODO - pass as param - peerTimeout: 15 * time.Second, // TODO - pass as param - minRecvRate: 0, // int64(7680), TODO - pass as param - } - - return &sc -} - -func (sc *scheduler) ensurePeer(peerID types.NodeID) *scPeer { - if _, ok := sc.peers[peerID]; !ok { - sc.peers[peerID] = newScPeer(peerID) - } - return sc.peers[peerID] -} - -func (sc *scheduler) touchPeer(peerID types.NodeID, time time.Time) error { - peer, ok := sc.peers[peerID] - if !ok { - return fmt.Errorf("couldn't find peer %s", peerID) - } - - if peer.state != peerStateReady { - return fmt.Errorf("tried to touch peer in state %s, must be Ready", peer.state) - } - - peer.lastTouched = time - - return nil -} - -func (sc *scheduler) removePeer(peerID types.NodeID) { - peer, ok := sc.peers[peerID] - if !ok { - return - } - if peer.state == peerStateRemoved { - return - } - - for height, pendingPeerID := range sc.pendingBlocks { - if pendingPeerID == peerID { - sc.setStateAtHeight(height, blockStateNew) - delete(sc.pendingTime, height) - delete(sc.pendingBlocks, height) - } - } - - for height, rcvPeerID := range sc.receivedBlocks { - if rcvPeerID == peerID { - sc.setStateAtHeight(height, blockStateNew) - delete(sc.receivedBlocks, height) - } - } - - // remove the blocks from blockStates if the peer removal causes the max peer height to be lower. - peer.state = peerStateRemoved - maxPeerHeight := int64(0) - for _, otherPeer := range sc.peers { - if otherPeer.state != peerStateReady { - continue - } - if otherPeer.peerID != peer.peerID && otherPeer.height > maxPeerHeight { - maxPeerHeight = otherPeer.height - } - } - for h := range sc.blockStates { - if h > maxPeerHeight { - delete(sc.blockStates, h) - } - } -} - -// check if the blockPool is running low and add new blocks in New state to be requested. -// This function is called when there is an increase in the maximum peer height or when -// blocks are processed. -func (sc *scheduler) addNewBlocks() { - if len(sc.blockStates) >= sc.targetPending { - return - } - - for i := sc.height; i < int64(sc.targetPending)+sc.height; i++ { - if i > sc.maxHeight() { - break - } - if sc.getStateAtHeight(i) == blockStateUnknown { - sc.setStateAtHeight(i, blockStateNew) - } - } -} - -func (sc *scheduler) setPeerRange(peerID types.NodeID, base int64, height int64) error { - peer := sc.ensurePeer(peerID) - - if peer.state == peerStateRemoved { - return nil // noop - } - - if height < peer.height { - sc.removePeer(peerID) - return fmt.Errorf("cannot move peer height lower. from %d to %d", peer.height, height) - } - - if base > height { - sc.removePeer(peerID) - return fmt.Errorf("cannot set peer base higher than its height") - } - - peer.base = base - peer.height = height - peer.state = peerStateReady - - sc.addNewBlocks() - return nil -} - -func (sc *scheduler) getStateAtHeight(height int64) blockState { - if height < sc.height { - return blockStateProcessed - } else if state, ok := sc.blockStates[height]; ok { - return state - } else { - return blockStateUnknown - } -} - -func (sc *scheduler) getPeersWithHeight(height int64) []types.NodeID { - peers := make([]types.NodeID, 0) - for _, peer := range sc.peers { - if peer.state != peerStateReady { - continue - } - if peer.base <= height && peer.height >= height { - peers = append(peers, peer.peerID) - } - } - return peers -} - -func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []types.NodeID { - prunable := make([]types.NodeID, 0) - for peerID, peer := range sc.peers { - if peer.state != peerStateReady { - continue - } - if now.Sub(peer.lastTouched) > peerTimout || peer.lastRate < minRecvRate { - prunable = append(prunable, peerID) - } - } - // Tests for handleTryPrunePeer() may fail without sort due to range non-determinism - sort.Sort(PeerByID(prunable)) - return prunable -} - -func (sc *scheduler) setStateAtHeight(height int64, state blockState) { - sc.blockStates[height] = state -} - -// CONTRACT: peer exists and in Ready state. -func (sc *scheduler) markReceived(peerID types.NodeID, height int64, size int64, now time.Time) error { - peer := sc.peers[peerID] - - if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID { - return fmt.Errorf("received block %d from peer %s without being requested", height, peerID) - } - - pendingTime, ok := sc.pendingTime[height] - if !ok || now.Sub(pendingTime) <= 0 { - return fmt.Errorf("clock error: block %d received at %s but requested at %s", - height, pendingTime, now) - } - - peer.lastRate = size / now.Sub(pendingTime).Nanoseconds() - - sc.setStateAtHeight(height, blockStateReceived) - delete(sc.pendingBlocks, height) - delete(sc.pendingTime, height) - - sc.receivedBlocks[height] = peerID - - return nil -} - -func (sc *scheduler) markPending(peerID types.NodeID, height int64, time time.Time) error { - state := sc.getStateAtHeight(height) - if state != blockStateNew { - return fmt.Errorf("block %d should be in blockStateNew but is %s", height, state) - } - - peer, ok := sc.peers[peerID] - if !ok { - return fmt.Errorf("cannot find peer %s", peerID) - } - - if peer.state != peerStateReady { - return fmt.Errorf("cannot schedule %d from %s in %s", height, peerID, peer.state) - } - - if height > peer.height { - return fmt.Errorf("cannot request height %d from peer %s that is at height %d", - height, peerID, peer.height) - } - - if height < peer.base { - return fmt.Errorf("cannot request height %d for peer %s with base %d", - height, peerID, peer.base) - } - - sc.setStateAtHeight(height, blockStatePending) - sc.pendingBlocks[height] = peerID - sc.pendingTime[height] = time - - return nil -} - -func (sc *scheduler) markProcessed(height int64) error { - // It is possible that a peer error or timeout is handled after the processor - // has processed the block but before the scheduler received this event, so - // when pcBlockProcessed event is received, the block had been requested - // again => don't check the block state. - sc.lastAdvance = time.Now() - sc.height = height + 1 - delete(sc.pendingBlocks, height) - delete(sc.pendingTime, height) - delete(sc.receivedBlocks, height) - delete(sc.blockStates, height) - sc.addNewBlocks() - return nil -} - -func (sc *scheduler) allBlocksProcessed() bool { - if len(sc.peers) == 0 { - return false - } - return sc.height >= sc.maxHeight() -} - -// returns max peer height or the last processed block, i.e. sc.height -func (sc *scheduler) maxHeight() int64 { - max := sc.height - 1 - for _, peer := range sc.peers { - if peer.state != peerStateReady { - continue - } - if max < peer.height { - max = peer.height - } - } - return max -} - -// lowest block in sc.blockStates with state == blockStateNew or -1 if no new blocks -func (sc *scheduler) nextHeightToSchedule() int64 { - var min int64 = math.MaxInt64 - for height, state := range sc.blockStates { - if state == blockStateNew && height < min { - min = height - } - } - if min == math.MaxInt64 { - min = -1 - } - return min -} - -func (sc *scheduler) pendingFrom(peerID types.NodeID) []int64 { - var heights []int64 - for height, pendingPeerID := range sc.pendingBlocks { - if pendingPeerID == peerID { - heights = append(heights, height) - } - } - return heights -} - -func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) { - peers := sc.getPeersWithHeight(height) - if len(peers) == 0 { - return "", fmt.Errorf("cannot find peer for height %d", height) - } - - // create a map from number of pending requests to a list - // of peers having that number of pending requests. - pendingFrom := make(map[int][]types.NodeID) - for _, peerID := range peers { - numPending := len(sc.pendingFrom(peerID)) - pendingFrom[numPending] = append(pendingFrom[numPending], peerID) - } - - // find the set of peers with minimum number of pending requests. - var minPending int64 = math.MaxInt64 - for mp := range pendingFrom { - if int64(mp) < minPending { - minPending = int64(mp) - } - } - - sort.Sort(PeerByID(pendingFrom[int(minPending)])) - return pendingFrom[int(minPending)][0], nil -} - -// PeerByID is a list of peers sorted by peerID. -type PeerByID []types.NodeID - -func (peers PeerByID) Len() int { - return len(peers) -} -func (peers PeerByID) Less(i, j int) bool { - return bytes.Compare([]byte(peers[i]), []byte(peers[j])) == -1 -} - -func (peers PeerByID) Swap(i, j int) { - peers[i], peers[j] = peers[j], peers[i] -} - -// Handlers - -// This handler gets the block, performs some validation and then passes it on to the processor. -func (sc *scheduler) handleBlockResponse(event bcBlockResponse) (Event, error) { - err := sc.touchPeer(event.peerID, event.time) - if err != nil { - // peer does not exist OR not ready - return noOp, nil - } - - err = sc.markReceived(event.peerID, event.block.Height, event.size, event.time) - if err != nil { - sc.removePeer(event.peerID) - return scPeerError{peerID: event.peerID, reason: err}, nil - } - - return scBlockReceived{peerID: event.peerID, block: event.block}, nil -} - -func (sc *scheduler) handleNoBlockResponse(event bcNoBlockResponse) (Event, error) { - // No such peer or peer was removed. - peer, ok := sc.peers[event.peerID] - if !ok || peer.state == peerStateRemoved { - return noOp, nil - } - - // The peer may have been just removed due to errors, low speed or timeouts. - sc.removePeer(event.peerID) - - return scPeerError{peerID: event.peerID, - reason: fmt.Errorf("peer %v with base %d height %d claims no block for %d", - event.peerID, peer.base, peer.height, event.height)}, nil -} - -func (sc *scheduler) handleBlockProcessed(event pcBlockProcessed) (Event, error) { - if event.height != sc.height { - panic(fmt.Sprintf("processed height %d, but expected height %d", event.height, sc.height)) - } - - err := sc.markProcessed(event.height) - if err != nil { - return scSchedulerFail{reason: err}, nil - } - - if sc.allBlocksProcessed() { - return scFinishedEv{reason: "processed all blocks"}, nil - } - - return noOp, nil -} - -// Handles an error from the processor. The processor had already cleaned the blocks from -// the peers included in this event. Just attempt to remove the peers. -func (sc *scheduler) handleBlockProcessError(event pcBlockVerificationFailure) (Event, error) { - // The peers may have been just removed due to errors, low speed or timeouts. - sc.removePeer(event.firstPeerID) - if event.firstPeerID != event.secondPeerID { - sc.removePeer(event.secondPeerID) - } - - if sc.allBlocksProcessed() { - return scFinishedEv{reason: "error on last block"}, nil - } - - return noOp, nil -} - -func (sc *scheduler) handleAddNewPeer(event bcAddNewPeer) (Event, error) { - sc.ensurePeer(event.peerID) - return noOp, nil -} - -func (sc *scheduler) handleRemovePeer(event bcRemovePeer) (Event, error) { - sc.removePeer(event.peerID) - - if sc.allBlocksProcessed() { - return scFinishedEv{reason: "removed peer"}, nil - } - - // Return scPeerError so the peer (and all associated blocks) is removed from - // the processor. - return scPeerError{peerID: event.peerID, reason: errors.New("peer was stopped")}, nil -} - -func (sc *scheduler) handleTryPrunePeer(event rTryPrunePeer) (Event, error) { - // Check behavior of peer responsible to deliver block at sc.height. - timeHeightAsked, ok := sc.pendingTime[sc.height] - if ok && time.Since(timeHeightAsked) > sc.peerTimeout { - // A request was sent to a peer for block at sc.height but a response was not received - // from that peer within sc.peerTimeout. Remove the peer. This is to ensure that a peer - // will be timed out even if it sends blocks at higher heights but prevents progress by - // not sending the block at current height. - sc.removePeer(sc.pendingBlocks[sc.height]) - } - - prunablePeers := sc.prunablePeers(sc.peerTimeout, sc.minRecvRate, event.time) - if len(prunablePeers) == 0 { - return noOp, nil - } - for _, peerID := range prunablePeers { - sc.removePeer(peerID) - } - - // If all blocks are processed we should finish. - if sc.allBlocksProcessed() { - return scFinishedEv{reason: "after try prune"}, nil - } - - return scPeersPruned{peers: prunablePeers}, nil -} - -func (sc *scheduler) handleResetState(event bcResetState) (Event, error) { - initHeight := event.state.LastBlockHeight + 1 - if initHeight == 1 { - initHeight = event.state.InitialHeight - } - sc.initHeight = initHeight - sc.height = initHeight - sc.lastAdvance = time.Now() - sc.addNewBlocks() - return noOp, nil -} - -func (sc *scheduler) handleTrySchedule(event rTrySchedule) (Event, error) { - if time.Since(sc.lastAdvance) > sc.syncTimeout { - return scFinishedEv{reason: "timeout, no advance"}, nil - } - - nextHeight := sc.nextHeightToSchedule() - if nextHeight == -1 { - return noOp, nil - } - - bestPeerID, err := sc.selectPeer(nextHeight) - if err != nil { - return scSchedulerFail{reason: err}, nil - } - if err := sc.markPending(bestPeerID, nextHeight, event.time); err != nil { - return scSchedulerFail{reason: err}, nil // XXX: peerError might be more appropriate - } - return scBlockRequest{peerID: bestPeerID, height: nextHeight}, nil - -} - -func (sc *scheduler) handleStatusResponse(event bcStatusResponse) (Event, error) { - err := sc.setPeerRange(event.peerID, event.base, event.height) - if err != nil { - return scPeerError{peerID: event.peerID, reason: err}, nil - } - return noOp, nil -} - -func (sc *scheduler) handle(event Event) (Event, error) { - switch event := event.(type) { - case bcResetState: - nextEvent, err := sc.handleResetState(event) - return nextEvent, err - case bcStatusResponse: - nextEvent, err := sc.handleStatusResponse(event) - return nextEvent, err - case bcBlockResponse: - nextEvent, err := sc.handleBlockResponse(event) - return nextEvent, err - case bcNoBlockResponse: - nextEvent, err := sc.handleNoBlockResponse(event) - return nextEvent, err - case rTrySchedule: - nextEvent, err := sc.handleTrySchedule(event) - return nextEvent, err - case bcAddNewPeer: - nextEvent, err := sc.handleAddNewPeer(event) - return nextEvent, err - case bcRemovePeer: - nextEvent, err := sc.handleRemovePeer(event) - return nextEvent, err - case rTryPrunePeer: - nextEvent, err := sc.handleTryPrunePeer(event) - return nextEvent, err - case pcBlockProcessed: - nextEvent, err := sc.handleBlockProcessed(event) - return nextEvent, err - case pcBlockVerificationFailure: - nextEvent, err := sc.handleBlockProcessError(event) - return nextEvent, err - default: - return scSchedulerFail{reason: fmt.Errorf("unknown event %v", event)}, nil - } -} diff --git a/internal/blocksync/v2/scheduler_test.go b/internal/blocksync/v2/scheduler_test.go deleted file mode 100644 index 91fac3637..000000000 --- a/internal/blocksync/v2/scheduler_test.go +++ /dev/null @@ -1,2253 +0,0 @@ -package v2 - -import ( - "fmt" - "math" - "sort" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" -) - -type scTestParams struct { - peers map[string]*scPeer - initHeight int64 - height int64 - allB []int64 - pending map[int64]types.NodeID - pendingTime map[int64]time.Time - received map[int64]types.NodeID - peerTimeout time.Duration - minRecvRate int64 - targetPending int - startTime time.Time - syncTimeout time.Duration -} - -func verifyScheduler(sc *scheduler) { - missing := 0 - if sc.maxHeight() >= sc.height { - missing = int(math.Min(float64(sc.targetPending), float64(sc.maxHeight()-sc.height+1))) - } - if len(sc.blockStates) != missing { - panic(fmt.Sprintf("scheduler block length %d different than target %d", len(sc.blockStates), missing)) - } -} - -func newTestScheduler(params scTestParams) *scheduler { - peers := make(map[types.NodeID]*scPeer) - var maxHeight int64 - - initHeight := params.initHeight - if initHeight == 0 { - initHeight = 1 - } - sc := newScheduler(initHeight, params.startTime) - if params.height != 0 { - sc.height = params.height - } - - for id, peer := range params.peers { - peer.peerID = types.NodeID(id) - peers[types.NodeID(id)] = peer - if maxHeight < peer.height { - maxHeight = peer.height - } - } - for _, h := range params.allB { - sc.blockStates[h] = blockStateNew - } - for h, pid := range params.pending { - sc.blockStates[h] = blockStatePending - sc.pendingBlocks[h] = pid - } - for h, tm := range params.pendingTime { - sc.pendingTime[h] = tm - } - for h, pid := range params.received { - sc.blockStates[h] = blockStateReceived - sc.receivedBlocks[h] = pid - } - - sc.peers = peers - sc.peerTimeout = params.peerTimeout - if params.syncTimeout == 0 { - sc.syncTimeout = 10 * time.Second - } else { - sc.syncTimeout = params.syncTimeout - } - - if params.targetPending == 0 { - sc.targetPending = 10 - } else { - sc.targetPending = params.targetPending - } - - sc.minRecvRate = params.minRecvRate - - verifyScheduler(sc) - - return sc -} - -func TestScInit(t *testing.T) { - var ( - initHeight int64 = 5 - sc = newScheduler(initHeight, time.Now()) - ) - assert.Equal(t, blockStateProcessed, sc.getStateAtHeight(initHeight-1)) - assert.Equal(t, blockStateUnknown, sc.getStateAtHeight(initHeight)) - assert.Equal(t, blockStateUnknown, sc.getStateAtHeight(initHeight+1)) -} - -func TestScMaxHeights(t *testing.T) { - - tests := []struct { - name string - sc scheduler - wantMax int64 - }{ - { - name: "no peers", - sc: scheduler{height: 11}, - wantMax: 10, - }, - { - name: "one ready peer", - sc: scheduler{ - height: 3, - peers: map[types.NodeID]*scPeer{"P1": {height: 6, state: peerStateReady}}, - }, - wantMax: 6, - }, - { - name: "ready and removed peers", - sc: scheduler{ - height: 1, - peers: map[types.NodeID]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 10, state: peerStateRemoved}}, - }, - wantMax: 4, - }, - { - name: "removed peers", - sc: scheduler{ - height: 1, - peers: map[types.NodeID]*scPeer{ - "P1": {height: 4, state: peerStateRemoved}, - "P2": {height: 10, state: peerStateRemoved}}, - }, - wantMax: 0, - }, - { - name: "new peers", - sc: scheduler{ - height: 1, - peers: map[types.NodeID]*scPeer{ - "P1": {base: -1, height: -1, state: peerStateNew}, - "P2": {base: -1, height: -1, state: peerStateNew}}, - }, - wantMax: 0, - }, - { - name: "mixed peers", - sc: scheduler{ - height: 1, - peers: map[types.NodeID]*scPeer{ - "P1": {height: -1, state: peerStateNew}, - "P2": {height: 10, state: peerStateReady}, - "P3": {height: 20, state: peerStateRemoved}, - "P4": {height: 22, state: peerStateReady}, - }, - }, - wantMax: 22, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - // maxHeight() should not mutate the scheduler - wantSc := tt.sc - - resMax := tt.sc.maxHeight() - assert.Equal(t, tt.wantMax, resMax) - assert.Equal(t, wantSc, tt.sc) - }) - } -} - -func TestScEnsurePeer(t *testing.T) { - - type args struct { - peerID types.NodeID - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - }{ - { - name: "add first peer", - fields: scTestParams{}, - args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {base: -1, height: -1, state: peerStateNew}}}, - }, - { - name: "add second peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {base: -1, height: -1, state: peerStateNew}}}, - args: args{peerID: "P2"}, - wantFields: scTestParams{peers: map[string]*scPeer{ - "P1": {base: -1, height: -1, state: peerStateNew}, - "P2": {base: -1, height: -1, state: peerStateNew}}}, - }, - { - name: "add duplicate peer is fine", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - }, - { - name: "add duplicate peer with existing peer in Ready state is noop", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 3}}, - allB: []int64{1, 2, 3}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 3}}, - allB: []int64{1, 2, 3}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - sc.ensurePeer(tt.args.peerID) - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) - }) - } -} - -func TestScTouchPeer(t *testing.T) { - now := time.Now() - - type args struct { - peerID types.NodeID - time time.Time - } - - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "attempt to touch non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 5}}, - allB: []int64{1, 2, 3, 4, 5}, - }, - args: args{peerID: "P2", time: now}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {state: peerStateReady, height: 5}}, - allB: []int64{1, 2, 3, 4, 5}, - }, - wantErr: true, - }, - { - name: "attempt to touch peer in state New", - fields: scTestParams{peers: map[string]*scPeer{"P1": {}}}, - args: args{peerID: "P1", time: now}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {}}}, - wantErr: true, - }, - { - name: "attempt to touch peer in state Removed", - fields: scTestParams{peers: map[string]*scPeer{"P1": {state: peerStateRemoved}, "P2": {state: peerStateReady}}}, - args: args{peerID: "P1", time: now}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {state: peerStateRemoved}, "P2": {state: peerStateReady}}}, - wantErr: true, - }, - { - name: "touch peer in state Ready", - fields: scTestParams{peers: map[string]*scPeer{"P1": {state: peerStateReady, lastTouched: now}}}, - args: args{peerID: "P1", time: now.Add(3 * time.Second)}, - wantFields: scTestParams{peers: map[string]*scPeer{ - "P1": {state: peerStateReady, lastTouched: now.Add(3 * time.Second)}}}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - if err := sc.touchPeer(tt.args.peerID, tt.args.time); (err != nil) != tt.wantErr { - t.Errorf("touchPeer() wantErr %v, error = %v", tt.wantErr, err) - } - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) - }) - } -} - -func TestScPrunablePeers(t *testing.T) { - now := time.Now() - - type args struct { - threshold time.Duration - time time.Time - minSpeed int64 - } - - tests := []struct { - name string - fields scTestParams - args args - wantResult []types.NodeID - }{ - { - name: "no peers", - fields: scTestParams{peers: map[string]*scPeer{}}, - args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100}, - wantResult: []types.NodeID{}, - }, - { - name: "mixed peers", - fields: scTestParams{peers: map[string]*scPeer{ - // X - removed, active, fast - "P1": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 101}, - // X - ready, active, fast - "P2": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 101}, - // X - removed, active, equal - "P3": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 100}, - // V - ready, inactive, equal - "P4": {state: peerStateReady, lastTouched: now, lastRate: 100}, - // V - ready, inactive, slow - "P5": {state: peerStateReady, lastTouched: now, lastRate: 99}, - // V - ready, active, slow - "P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90}, - }}, - args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100}, - wantResult: []types.NodeID{"P4", "P5", "P6"}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // peersSlowerThan should not mutate the scheduler - wantSc := sc - res := sc.prunablePeers(tt.args.threshold, tt.args.minSpeed, tt.args.time) - assert.Equal(t, tt.wantResult, res) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScRemovePeer(t *testing.T) { - - type args struct { - peerID types.NodeID - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "remove non existing peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - args: args{peerID: "P2"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - }, - { - name: "remove single New peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}}}, - args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateRemoved}}}, - }, - { - name: "remove one of two New peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1}, "P2": {height: -1}}}, - args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateRemoved}, "P2": {height: -1}}}, - }, - { - name: "remove one Ready peer, all peers removed", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 10, state: peerStateRemoved}, - "P2": {height: 5, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5}, - }, - args: args{peerID: "P2"}, - wantFields: scTestParams{peers: map[string]*scPeer{ - "P1": {height: 10, state: peerStateRemoved}, - "P2": {height: 5, state: peerStateRemoved}}, - }, - }, - { - name: "attempt to remove already removed peer", - fields: scTestParams{ - height: 8, - peers: map[string]*scPeer{ - "P1": {height: 10, state: peerStateRemoved}, - "P2": {height: 11, state: peerStateReady}}, - allB: []int64{8, 9, 10, 11}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - height: 8, - peers: map[string]*scPeer{ - "P1": {height: 10, state: peerStateRemoved}, - "P2": {height: 11, state: peerStateReady}}, - allB: []int64{8, 9, 10, 11}}, - }, - { - name: "remove Ready peer with blocks requested", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{1: "P1"}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}}, - allB: []int64{}, - pending: map[int64]types.NodeID{}, - }, - }, - { - name: "remove Ready peer with blocks received", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - received: map[int64]types.NodeID{1: "P1"}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}}, - allB: []int64{}, - received: map[int64]types.NodeID{}, - }, - }, - { - name: "remove Ready peer with blocks received and requested (not yet received)", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 3: "P1"}, - received: map[int64]types.NodeID{2: "P1", 4: "P1"}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}, - allB: []int64{}, - pending: map[int64]types.NodeID{}, - received: map[int64]types.NodeID{}, - }, - }, - { - name: "remove Ready peer from multiple peers set, with blocks received and requested (not yet received)", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 6, state: peerStateReady}, - "P2": {height: 6, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4, 5, 6}, - pending: map[int64]types.NodeID{1: "P1", 3: "P2", 6: "P1"}, - received: map[int64]types.NodeID{2: "P1", 4: "P2", 5: "P2"}, - }, - args: args{peerID: "P1"}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 6, state: peerStateRemoved}, - "P2": {height: 6, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4, 5, 6}, - pending: map[int64]types.NodeID{3: "P2"}, - received: map[int64]types.NodeID{4: "P2", 5: "P2"}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - sc.removePeer(tt.args.peerID) - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) - }) - } -} - -func TestScSetPeerRange(t *testing.T) { - - type args struct { - peerID types.NodeID - base int64 - height int64 - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "change height of non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P2", height: 4}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 2, state: peerStateReady}, - "P2": {height: 4, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}}, - }, - { - name: "increase height of removed peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - args: args{peerID: "P1", height: 4}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - }, - { - name: "decrease height of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - args: args{peerID: "P1", height: 2}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}, - allB: []int64{}}, - wantErr: true, - }, - { - name: "increase height of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P1", height: 4}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - }, - { - name: "noop height change of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - args: args{peerID: "P1", height: 4}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - }, - { - name: "add peer with huge height 10**10 ", - fields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: -1, state: peerStateNew}}, - targetPending: 4, - }, - args: args{peerID: "P2", height: 10000000000}, - wantFields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P2": {height: 10000000000, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - }, - { - name: "add peer with base > height should error", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - args: args{peerID: "P1", base: 6, height: 5}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, - wantErr: true, - }, - { - name: "add peer with base == height is fine", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateNew}}, - targetPending: 4, - }, - args: args{peerID: "P1", base: 6, height: 6}, - wantFields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P1": {base: 6, height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - err := sc.setPeerRange(tt.args.peerID, tt.args.base, tt.args.height) - if (err != nil) != tt.wantErr { - t.Errorf("setPeerHeight() wantErr %v, error = %v", tt.wantErr, err) - } - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc, "wanted peers %v, got %v", wantSc.peers, sc.peers) - }) - } -} - -func TestScGetPeersWithHeight(t *testing.T) { - - type args struct { - height int64 - } - tests := []struct { - name string - fields scTestParams - args args - wantResult []types.NodeID - }{ - { - name: "no peers", - fields: scTestParams{peers: map[string]*scPeer{}}, - args: args{height: 10}, - wantResult: []types.NodeID{}, - }, - { - name: "only new peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}}, - args: args{height: 10}, - wantResult: []types.NodeID{}, - }, - { - name: "only Removed peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, - args: args{height: 2}, - wantResult: []types.NodeID{}, - }, - { - name: "one Ready shorter peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 5}, - wantResult: []types.NodeID{}, - }, - { - name: "one Ready equal peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: []types.NodeID{"P1"}, - }, - { - name: "one Ready higher peer", - fields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P1": {height: 20, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: []types.NodeID{"P1"}, - }, - { - name: "one Ready higher peer at base", - fields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P1": {base: 4, height: 20, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: []types.NodeID{"P1"}, - }, - { - name: "one Ready higher peer with higher base", - fields: scTestParams{ - targetPending: 4, - peers: map[string]*scPeer{"P1": {base: 10, height: 20, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: []types.NodeID{}, - }, - { - name: "multiple mixed peers", - fields: scTestParams{ - height: 8, - peers: map[string]*scPeer{ - "P1": {height: -1, state: peerStateNew}, - "P2": {height: 10, state: peerStateReady}, - "P3": {height: 5, state: peerStateReady}, - "P4": {height: 20, state: peerStateRemoved}, - "P5": {height: 11, state: peerStateReady}}, - allB: []int64{8, 9, 10, 11}, - }, - args: args{height: 8}, - wantResult: []types.NodeID{"P2", "P5"}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // getPeersWithHeight should not mutate the scheduler - wantSc := sc - res := sc.getPeersWithHeight(tt.args.height) - sort.Sort(PeerByID(res)) - assert.Equal(t, tt.wantResult, res) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScMarkPending(t *testing.T) { - now := time.Now() - - type args struct { - peerID types.NodeID - height int64 - tm time.Time - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "attempt mark pending an unknown block above height", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P1", height: 3, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - wantErr: true, - }, - { - name: "attempt mark pending an unknown block below base", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}}, - args: args{peerID: "P1", height: 3, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}}, - wantErr: true, - }, - { - name: "attempt mark pending from non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P2", height: 1, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - wantErr: true, - }, - { - name: "mark pending from Removed peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - args: args{peerID: "P1", height: 1, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - wantErr: true, - }, - { - name: "mark pending from New peer", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 4, state: peerStateNew}, - }, - allB: []int64{1, 2, 3, 4}, - }, - args: args{peerID: "P2", height: 2, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 4, state: peerStateNew}, - }, - allB: []int64{1, 2, 3, 4}, - }, - wantErr: true, - }, - { - name: "mark pending from short peer", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 2, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}, - }, - args: args{peerID: "P2", height: 3, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 2, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}, - }, - wantErr: true, - }, - { - name: "mark pending all good", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1"}, - pendingTime: map[int64]time.Time{1: now}, - }, - args: args{peerID: "P1", height: 2, tm: now.Add(time.Millisecond)}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Millisecond)}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - if err := sc.markPending(tt.args.peerID, tt.args.height, tt.args.tm); (err != nil) != tt.wantErr { - t.Errorf("markPending() wantErr %v, error = %v", tt.wantErr, err) - } - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScMarkReceived(t *testing.T) { - now := time.Now() - - type args struct { - peerID types.NodeID - height int64 - size int64 - tm time.Time - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "received from non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{peerID: "P2", height: 1, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - wantErr: true, - }, - { - name: "received from removed peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - args: args{peerID: "P1", height: 1, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - wantErr: true, - }, - { - name: "received from unsolicited peer", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 4, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"}, - }, - args: args{peerID: "P1", height: 2, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 4, state: peerStateReady}, - }, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"}, - }, - wantErr: true, - }, - { - name: "received but blockRequest not sent", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{}, - }, - args: args{peerID: "P1", height: 2, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{}, - }, - wantErr: true, - }, - { - name: "received with bad timestamp", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)}, - }, - args: args{peerID: "P1", height: 2, size: 1000, tm: now}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)}, - }, - wantErr: true, - }, - { - name: "received all good", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now}, - }, - args: args{peerID: "P1", height: 2, size: 1000, tm: now.Add(time.Millisecond)}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1"}, - pendingTime: map[int64]time.Time{1: now}, - received: map[int64]types.NodeID{2: "P1"}, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - if err := sc.markReceived( - tt.args.peerID, - tt.args.height, - tt.args.size, - now.Add(time.Second)); (err != nil) != tt.wantErr { - t.Errorf("markReceived() wantErr %v, error = %v", tt.wantErr, err) - } - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScMarkProcessed(t *testing.T) { - now := time.Now() - - type args struct { - height int64 - } - tests := []struct { - name string - fields scTestParams - args args - wantFields scTestParams - wantErr bool - }{ - { - name: "processed an unreceived block", - fields: scTestParams{ - height: 2, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{2}, - pending: map[int64]types.NodeID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}, - targetPending: 1, - }, - args: args{height: 2}, - wantFields: scTestParams{ - height: 3, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{3}, - targetPending: 1, - }, - }, - { - name: "mark processed success", - fields: scTestParams{ - height: 1, - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - pending: map[int64]types.NodeID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}, - received: map[int64]types.NodeID{1: "P1"}}, - args: args{height: 1}, - wantFields: scTestParams{ - height: 2, - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{2}, - pending: map[int64]types.NodeID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - oldBlockState := sc.getStateAtHeight(tt.args.height) - if err := sc.markProcessed(tt.args.height); (err != nil) != tt.wantErr { - t.Errorf("markProcessed() wantErr %v, error = %v", tt.wantErr, err) - } - if tt.wantErr { - assert.Equal(t, oldBlockState, sc.getStateAtHeight(tt.args.height)) - } else { - assert.Equal(t, blockStateProcessed, sc.getStateAtHeight(tt.args.height)) - } - wantSc := newTestScheduler(tt.wantFields) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -func TestScResetState(t *testing.T) { - tests := []struct { - name string - fields scTestParams - state state.State - wantFields scTestParams - }{ - { - name: "updates height and initHeight", - fields: scTestParams{ - height: 0, - initHeight: 0, - }, - state: state.State{LastBlockHeight: 7}, - wantFields: scTestParams{ - height: 8, - initHeight: 8, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - e, err := sc.handleResetState(bcResetState{state: tt.state}) - require.NoError(t, err) - assert.Equal(t, e, noOp) - wantSc := newTestScheduler(tt.wantFields) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -func TestScAllBlocksProcessed(t *testing.T) { - now := time.Now() - - tests := []struct { - name string - fields scTestParams - wantResult bool - }{ - { - name: "no blocks, no peers", - fields: scTestParams{}, - wantResult: false, - }, - { - name: "only New blocks", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - wantResult: false, - }, - { - name: "only Pending blocks", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now}, - }, - wantResult: false, - }, - { - name: "only Received blocks", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, - }, - wantResult: false, - }, - { - name: "only Processed blocks plus highest is received", - fields: scTestParams{ - height: 4, - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}}, - allB: []int64{4}, - received: map[int64]types.NodeID{4: "P1"}, - }, - wantResult: true, - }, - { - name: "mixed block states", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{2: "P1", 4: "P1"}, - pendingTime: map[int64]time.Time{2: now, 4: now}, - }, - wantResult: false, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // allBlocksProcessed() should not mutate the scheduler - wantSc := sc - res := sc.allBlocksProcessed() - assert.Equal(t, tt.wantResult, res) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -func TestScNextHeightToSchedule(t *testing.T) { - now := time.Now() - - tests := []struct { - name string - fields scTestParams - wantHeight int64 - }{ - { - name: "no blocks", - fields: scTestParams{initHeight: 11, height: 11}, - wantHeight: -1, - }, - { - name: "only New blocks", - fields: scTestParams{ - initHeight: 3, - peers: map[string]*scPeer{"P1": {height: 6, state: peerStateReady}}, - allB: []int64{3, 4, 5, 6}, - }, - wantHeight: 3, - }, - { - name: "only Pending blocks", - fields: scTestParams{ - initHeight: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, - pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now}, - }, - wantHeight: -1, - }, - { - name: "only Received blocks", - fields: scTestParams{ - initHeight: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, - }, - wantHeight: -1, - }, - { - name: "only Processed blocks", - fields: scTestParams{ - initHeight: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - wantHeight: 1, - }, - { - name: "mixed block states", - fields: scTestParams{ - initHeight: 1, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{2: "P1"}, - pendingTime: map[int64]time.Time{2: now}, - }, - wantHeight: 1, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // nextHeightToSchedule() should not mutate the scheduler - wantSc := sc - - resMin := sc.nextHeightToSchedule() - assert.Equal(t, tt.wantHeight, resMin) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -func TestScSelectPeer(t *testing.T) { - - type args struct { - height int64 - } - tests := []struct { - name string - fields scTestParams - args args - wantResult types.NodeID - wantError bool - }{ - { - name: "no peers", - fields: scTestParams{peers: map[string]*scPeer{}}, - args: args{height: 10}, - wantResult: "", - wantError: true, - }, - { - name: "only new peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}}, - args: args{height: 10}, - wantResult: "", - wantError: true, - }, - { - name: "only Removed peers", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, - args: args{height: 2}, - wantResult: "", - wantError: true, - }, - { - name: "one Ready shorter peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 5}, - wantResult: "", - wantError: true, - }, - { - name: "one Ready equal peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}, - }, - args: args{height: 4}, - wantResult: "P1", - }, - { - name: "one Ready higher peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}, - }, - args: args{height: 4}, - wantResult: "P1", - }, - { - name: "one Ready higher peer with higher base", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}, - }, - args: args{height: 3}, - wantResult: "", - wantError: true, - }, - { - name: "many Ready higher peers with different number of pending requests", - fields: scTestParams{ - height: 4, - peers: map[string]*scPeer{ - "P1": {height: 8, state: peerStateReady}, - "P2": {height: 9, state: peerStateReady}}, - allB: []int64{4, 5, 6, 7, 8, 9}, - pending: map[int64]types.NodeID{ - 4: "P1", 6: "P1", - 5: "P2", - }, - }, - args: args{height: 4}, - wantResult: "P2", - }, - { - name: "many Ready higher peers with same number of pending requests", - fields: scTestParams{ - peers: map[string]*scPeer{ - "P2": {height: 20, state: peerStateReady}, - "P1": {height: 15, state: peerStateReady}, - "P3": {height: 15, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, - pending: map[int64]types.NodeID{ - 1: "P1", 2: "P1", - 3: "P3", 4: "P3", - 5: "P2", 6: "P2", - }, - }, - args: args{height: 7}, - wantResult: "P1", - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - // selectPeer should not mutate the scheduler - wantSc := sc - res, err := sc.selectPeer(tt.args.height) - assert.Equal(t, tt.wantResult, res) - assert.Equal(t, tt.wantError, err != nil) - checkSameScheduler(t, wantSc, sc) - }) - } -} - -// makeScBlock makes an empty block. -func makeScBlock(height int64) *types.Block { - return &types.Block{Header: types.Header{Height: height}} -} - -// used in place of assert.Equal(t, want, actual) to avoid failures due to -// scheduler.lastAdvanced timestamp inequalities. -func checkSameScheduler(t *testing.T, want *scheduler, actual *scheduler) { - assert.Equal(t, want.initHeight, actual.initHeight) - assert.Equal(t, want.height, actual.height) - assert.Equal(t, want.peers, actual.peers) - assert.Equal(t, want.blockStates, actual.blockStates) - assert.Equal(t, want.pendingBlocks, actual.pendingBlocks) - assert.Equal(t, want.pendingTime, actual.pendingTime) - assert.Equal(t, want.blockStates, actual.blockStates) - assert.Equal(t, want.receivedBlocks, actual.receivedBlocks) - assert.Equal(t, want.blockStates, actual.blockStates) -} - -// checkScResults checks scheduler handler test results -func checkScResults(t *testing.T, wantErr bool, err error, wantEvent Event, event Event) { - if (err != nil) != wantErr { - t.Errorf("error = %v, wantErr %v", err, wantErr) - return - } - if !assert.IsType(t, wantEvent, event) { - t.Log(fmt.Sprintf("Wrong type received, got: %v", event)) - } - switch wantEvent := wantEvent.(type) { - case scPeerError: - assert.Equal(t, wantEvent.peerID, event.(scPeerError).peerID) - assert.Equal(t, wantEvent.reason != nil, event.(scPeerError).reason != nil) - case scBlockReceived: - assert.Equal(t, wantEvent.peerID, event.(scBlockReceived).peerID) - assert.Equal(t, wantEvent.block, event.(scBlockReceived).block) - case scSchedulerFail: - assert.Equal(t, wantEvent.reason != nil, event.(scSchedulerFail).reason != nil) - } -} - -func TestScHandleBlockResponse(t *testing.T) { - now := time.Now() - block6FromP1 := bcBlockResponse{ - time: now.Add(time.Millisecond), - peerID: types.NodeID("P1"), - size: 100, - block: makeScBlock(6), - } - - type args struct { - event bcBlockResponse - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "empty scheduler", - fields: scTestParams{}, - args: args{event: block6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "block from removed peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - args: args{event: block6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "block we haven't asked for", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}}, - args: args{event: block6FromP1}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - }, - { - name: "block from wrong peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P2"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: block6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "block with bad timestamp", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now.Add(time.Second)}, - }, - args: args{event: block6FromP1}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - }, - { - name: "good block, accept", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: block6FromP1}, - wantEvent: scBlockReceived{peerID: "P1", block: block6FromP1.block}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleBlockResponse(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleNoBlockResponse(t *testing.T) { - now := time.Now() - noBlock6FromP1 := bcNoBlockResponse{ - time: now.Add(time.Millisecond), - peerID: types.NodeID("P1"), - height: 6, - } - - tests := []struct { - name string - fields scTestParams - wantEvent Event - wantFields scTestParams - wantErr bool - }{ - { - name: "empty scheduler", - fields: scTestParams{}, - wantEvent: noOpEvent{}, - wantFields: scTestParams{}, - }, - { - name: "noBlock from removed peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - wantEvent: noOpEvent{}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - }, - { - name: "for block we haven't asked for", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - }, - { - name: "noBlock from peer we don't have", - fields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P2"}, - pendingTime: map[int64]time.Time{6: now}, - }, - wantEvent: noOpEvent{}, - wantFields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P2"}, - pendingTime: map[int64]time.Time{6: now}, - }, - }, - { - name: "noBlock from existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: 8, state: peerStateRemoved}}}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleNoBlockResponse(noBlock6FromP1) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - wantSc := newTestScheduler(tt.wantFields) - assert.Equal(t, wantSc, sc) - }) - } -} - -func TestScHandleBlockProcessed(t *testing.T) { - now := time.Now() - processed6FromP1 := pcBlockProcessed{ - peerID: types.NodeID("P1"), - height: 6, - } - - type args struct { - event pcBlockProcessed - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "empty scheduler", - fields: scTestParams{height: 6}, - args: args{event: processed6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "processed block we don't have", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: processed6FromP1}, - wantEvent: noOpEvent{}, - }, - { - name: "processed block ok, we processed all blocks", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}}, - allB: []int64{6, 7}, - received: map[int64]types.NodeID{6: "P1", 7: "P1"}, - }, - args: args{event: processed6FromP1}, - wantEvent: scFinishedEv{}, - }, - { - name: "processed block ok, we still have blocks to process", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - pending: map[int64]types.NodeID{7: "P1", 8: "P1"}, - received: map[int64]types.NodeID{6: "P1"}, - }, - args: args{event: processed6FromP1}, - wantEvent: noOpEvent{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleBlockProcessed(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleBlockVerificationFailure(t *testing.T) { - now := time.Now() - - type args struct { - event pcBlockVerificationFailure - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "empty scheduler", - fields: scTestParams{}, - args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: noOpEvent{}, - }, - { - name: "failed block we don't have, single peer is still removed", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: scFinishedEv{}, - }, - { - name: "failed block we don't have, one of two peers are removed", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, - pendingTime: map[int64]time.Time{6: now}, - }, - args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: noOpEvent{}, - }, - { - name: "failed block, all blocks are processed after removal", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}}, - allB: []int64{6, 7}, - received: map[int64]types.NodeID{6: "P1", 7: "P1"}, - }, - args: args{event: pcBlockVerificationFailure{height: 7, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: scFinishedEv{}, - }, - { - name: "failed block, we still have blocks to process", - fields: scTestParams{ - initHeight: 5, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}}, - allB: []int64{5, 6, 7, 8}, - pending: map[int64]types.NodeID{7: "P1", 8: "P1"}, - received: map[int64]types.NodeID{5: "P1", 6: "P1"}, - }, - args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: noOpEvent{}, - }, - { - name: "failed block, H+1 and H+2 delivered by different peers, we still have blocks to process", - fields: scTestParams{ - initHeight: 5, - peers: map[string]*scPeer{ - "P1": {height: 8, state: peerStateReady}, - "P2": {height: 8, state: peerStateReady}, - "P3": {height: 8, state: peerStateReady}, - }, - allB: []int64{5, 6, 7, 8}, - pending: map[int64]types.NodeID{7: "P1", 8: "P1"}, - received: map[int64]types.NodeID{5: "P1", 6: "P1"}, - }, - args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P2"}}, - wantEvent: noOpEvent{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleBlockProcessError(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleAddNewPeer(t *testing.T) { - addP1 := bcAddNewPeer{ - peerID: types.NodeID("P1"), - } - type args struct { - event bcAddNewPeer - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "add P1 to empty scheduler", - fields: scTestParams{}, - args: args{event: addP1}, - wantEvent: noOpEvent{}, - }, - { - name: "add duplicate peer", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - }, - args: args{event: addP1}, - wantEvent: noOpEvent{}, - }, - { - name: "add P1 to non empty scheduler", - fields: scTestParams{ - initHeight: 6, - peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, - allB: []int64{6, 7, 8}, - }, - args: args{event: addP1}, - wantEvent: noOpEvent{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleAddNewPeer(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleTryPrunePeer(t *testing.T) { - now := time.Now() - - pruneEv := rTryPrunePeer{ - time: now.Add(time.Second + time.Millisecond), - } - type args struct { - event rTryPrunePeer - } - - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "no peers", - fields: scTestParams{}, - args: args{event: pruneEv}, - wantEvent: noOpEvent{}, - }, - { - name: "no prunable peers", - fields: scTestParams{ - minRecvRate: 100, - peers: map[string]*scPeer{ - // X - removed, active, fast - "P1": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 101}, - // X - ready, active, fast - "P2": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 101}, - // X - removed, active, equal - "P3": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 100}}, - peerTimeout: time.Second, - }, - args: args{event: pruneEv}, - wantEvent: noOpEvent{}, - }, - { - name: "mixed peers", - fields: scTestParams{ - minRecvRate: 100, - peers: map[string]*scPeer{ - // X - removed, active, fast - "P1": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 101, height: 5}, - // X - ready, active, fast - "P2": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 101, height: 5}, - // X - removed, active, equal - "P3": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 100, height: 5}, - // V - ready, inactive, equal - "P4": {state: peerStateReady, lastTouched: now, lastRate: 100, height: 7}, - // V - ready, inactive, slow - "P5": {state: peerStateReady, lastTouched: now, lastRate: 99, height: 7}, - // V - ready, active, slow - "P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90, height: 7}, - }, - allB: []int64{1, 2, 3, 4, 5, 6, 7}, - peerTimeout: time.Second}, - args: args{event: pruneEv}, - wantEvent: scPeersPruned{peers: []types.NodeID{"P4", "P5", "P6"}}, - }, - { - name: "mixed peers, finish after pruning", - fields: scTestParams{ - minRecvRate: 100, - height: 6, - peers: map[string]*scPeer{ - // X - removed, active, fast - "P1": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 101, height: 5}, - // X - ready, active, fast - "P2": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 101, height: 5}, - // X - removed, active, equal - "P3": {state: peerStateRemoved, lastTouched: now.Add(time.Second), lastRate: 100, height: 5}, - // V - ready, inactive, equal - "P4": {state: peerStateReady, lastTouched: now, lastRate: 100, height: 7}, - // V - ready, inactive, slow - "P5": {state: peerStateReady, lastTouched: now, lastRate: 99, height: 7}, - // V - ready, active, slow - "P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90, height: 7}, - }, - allB: []int64{6, 7}, - peerTimeout: time.Second}, - args: args{event: pruneEv}, - wantEvent: scFinishedEv{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleTryPrunePeer(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleTrySchedule(t *testing.T) { - now := time.Now() - tryEv := rTrySchedule{ - time: now.Add(time.Second + time.Millisecond), - } - - type args struct { - event rTrySchedule - } - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "no peers", - fields: scTestParams{startTime: now, peers: map[string]*scPeer{}}, - args: args{event: tryEv}, - wantEvent: noOpEvent{}, - }, - { - name: "only new peers", - fields: scTestParams{startTime: now, peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}}, - args: args{event: tryEv}, - wantEvent: noOpEvent{}, - }, - { - name: "only Removed peers", - fields: scTestParams{startTime: now, peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, - args: args{event: tryEv}, - wantEvent: noOpEvent{}, - }, - { - name: "one Ready shorter peer", - fields: scTestParams{ - startTime: now, - height: 6, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}}, - args: args{event: tryEv}, - wantEvent: noOpEvent{}, - }, - { - name: "one Ready equal peer", - fields: scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4}}, - args: args{event: tryEv}, - wantEvent: scBlockRequest{peerID: "P1", height: 1}, - }, - { - name: "many Ready higher peers with different number of pending requests", - fields: scTestParams{ - startTime: now, - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady}, - "P2": {height: 5, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5}, - pending: map[int64]types.NodeID{ - 1: "P1", 2: "P1", - 3: "P2", - }, - }, - args: args{event: tryEv}, - wantEvent: scBlockRequest{peerID: "P2", height: 4}, - }, - - { - name: "many Ready higher peers with same number of pending requests", - fields: scTestParams{ - startTime: now, - peers: map[string]*scPeer{ - "P2": {height: 8, state: peerStateReady}, - "P1": {height: 8, state: peerStateReady}, - "P3": {height: 8, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{ - 1: "P1", 2: "P1", - 3: "P3", 4: "P3", - 5: "P2", 6: "P2", - }, - }, - args: args{event: tryEv}, - wantEvent: scBlockRequest{peerID: "P1", height: 7}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleTrySchedule(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandleStatusResponse(t *testing.T) { - now := time.Now() - statusRespP1Ev := bcStatusResponse{ - time: now.Add(time.Second + time.Millisecond), - peerID: "P1", - height: 6, - } - - type args struct { - event bcStatusResponse - } - tests := []struct { - name string - fields scTestParams - args args - wantEvent Event - wantErr bool - }{ - { - name: "change height of non existing peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P2": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}, - }, - args: args{event: statusRespP1Ev}, - wantEvent: noOpEvent{}, - }, - - { - name: "increase height of removed peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 2, state: peerStateRemoved}}}, - args: args{event: statusRespP1Ev}, - wantEvent: noOpEvent{}, - }, - - { - name: "decrease height of single peer", - fields: scTestParams{ - height: 5, - peers: map[string]*scPeer{"P1": {height: 10, state: peerStateReady}}, - allB: []int64{5, 6, 7, 8, 9, 10}, - }, - args: args{event: statusRespP1Ev}, - wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, - }, - - { - name: "increase height of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, - allB: []int64{1, 2}}, - args: args{event: statusRespP1Ev}, - wantEvent: noOpEvent{}, - }, - { - name: "noop height change of single peer", - fields: scTestParams{ - peers: map[string]*scPeer{"P1": {height: 6, state: peerStateReady}}, - allB: []int64{1, 2, 3, 4, 5, 6}}, - args: args{event: statusRespP1Ev}, - wantEvent: noOpEvent{}, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - sc := newTestScheduler(tt.fields) - event, err := sc.handleStatusResponse(tt.args.event) - checkScResults(t, tt.wantErr, err, tt.wantEvent, event) - }) - } -} - -func TestScHandle(t *testing.T) { - now := time.Now() - - type unknownEv struct { - priorityNormal - } - - block1, block2, block3 := makeScBlock(1), makeScBlock(2), makeScBlock(3) - - t0 := time.Now() - tick := make([]time.Time, 100) - for i := range tick { - tick[i] = t0.Add(time.Duration(i) * time.Millisecond) - } - - type args struct { - event Event - } - type scStep struct { - currentSc *scTestParams - args args - wantEvent Event - wantErr bool - wantSc *scTestParams - } - tests := []struct { - name string - steps []scStep - }{ - { - name: "unknown event", - steps: []scStep{ - { // add P1 - currentSc: &scTestParams{}, - args: args{event: unknownEv{}}, - wantEvent: scSchedulerFail{reason: fmt.Errorf("some error")}, - wantSc: &scTestParams{}, - }, - }, - }, - { - name: "single peer, sync 3 blocks", - steps: []scStep{ - { // add P1 - currentSc: &scTestParams{startTime: now, peers: map[string]*scPeer{}, height: 1}, - args: args{event: bcAddNewPeer{peerID: "P1"}}, - wantEvent: noOpEvent{}, - wantSc: &scTestParams{startTime: now, peers: map[string]*scPeer{ - "P1": {base: -1, height: -1, state: peerStateNew}}, height: 1}, - }, - { // set height of P1 - args: args{event: bcStatusResponse{peerID: "P1", time: tick[0], height: 3}}, - wantEvent: noOpEvent{}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - height: 1, - }, - }, - { // schedule block 1 - args: args{event: rTrySchedule{time: tick[1]}}, - wantEvent: scBlockRequest{peerID: "P1", height: 1}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{1: "P1"}, - pendingTime: map[int64]time.Time{1: tick[1]}, - height: 1, - }, - }, - { // schedule block 2 - args: args{event: rTrySchedule{time: tick[2]}}, - wantEvent: scBlockRequest{peerID: "P1", height: 2}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, - pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2]}, - height: 1, - }, - }, - { // schedule block 3 - args: args{event: rTrySchedule{time: tick[3]}}, - wantEvent: scBlockRequest{peerID: "P1", height: 3}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"}, - pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2], 3: tick[3]}, - height: 1, - }, - }, - { // block response 1 - args: args{event: bcBlockResponse{peerID: "P1", time: tick[4], size: 100, block: block1}}, - wantEvent: scBlockReceived{peerID: "P1", block: block1}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[4]}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{2: "P1", 3: "P1"}, - pendingTime: map[int64]time.Time{2: tick[2], 3: tick[3]}, - received: map[int64]types.NodeID{1: "P1"}, - height: 1, - }, - }, - { // block response 2 - args: args{event: bcBlockResponse{peerID: "P1", time: tick[5], size: 100, block: block2}}, - wantEvent: scBlockReceived{peerID: "P1", block: block2}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[5]}}, - allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{3: "P1"}, - pendingTime: map[int64]time.Time{3: tick[3]}, - received: map[int64]types.NodeID{1: "P1", 2: "P1"}, - height: 1, - }, - }, - { // block response 3 - args: args{event: bcBlockResponse{peerID: "P1", time: tick[6], size: 100, block: block3}}, - wantEvent: scBlockReceived{peerID: "P1", block: block3}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{1, 2, 3}, - received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"}, - height: 1, - }, - }, - { // processed block 1 - args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 1}}, - wantEvent: noOpEvent{}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{2, 3}, - received: map[int64]types.NodeID{2: "P1", 3: "P1"}, - height: 2, - }, - }, - { // processed block 2 - args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 2}}, - wantEvent: scFinishedEv{}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{3}, - received: map[int64]types.NodeID{3: "P1"}, - height: 3, - }, - }, - }, - }, - { - name: "block verification failure", - steps: []scStep{ - { // failure processing block 1 - currentSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateReady, lastTouched: tick[6]}, - "P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{1, 2, 3, 4}, - received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"}, - height: 1, - }, - args: args{event: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"}}, - wantEvent: noOpEvent{}, - wantSc: &scTestParams{ - startTime: now, - peers: map[string]*scPeer{ - "P1": {height: 4, state: peerStateRemoved, lastTouched: tick[6]}, - "P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, - allB: []int64{1, 2, 3}, - received: map[int64]types.NodeID{}, - height: 1, - }, - }, - }, - }, - } - - for _, tt := range tests { - tt := tt - t.Run(tt.name, func(t *testing.T) { - var sc *scheduler - for i, step := range tt.steps { - // First step must always initialize the currentState as state. - if step.currentSc != nil { - sc = newTestScheduler(*step.currentSc) - } - if sc == nil { - panic("Bad (initial?) step") - } - - nextEvent, err := sc.handle(step.args.event) - wantSc := newTestScheduler(*step.wantSc) - - t.Logf("step %d(%v): %s", i, step.args.event, sc) - checkSameScheduler(t, wantSc, sc) - - checkScResults(t, step.wantErr, err, step.wantEvent, nextEvent) - - // Next step may use the wantedState as their currentState. - sc = newTestScheduler(*step.wantSc) - } - }) - } -} diff --git a/internal/blocksync/v2/types.go b/internal/blocksync/v2/types.go deleted file mode 100644 index 7a73728e4..000000000 --- a/internal/blocksync/v2/types.go +++ /dev/null @@ -1,65 +0,0 @@ -package v2 - -import ( - "github.com/Workiva/go-datastructures/queue" -) - -// Event is the type that can be added to the priority queue. -type Event queue.Item - -type priority interface { - Compare(other queue.Item) int - Priority() int -} - -type priorityLow struct{} -type priorityNormal struct{} -type priorityHigh struct{} - -func (p priorityLow) Priority() int { - return 1 -} - -func (p priorityNormal) Priority() int { - return 2 -} - -func (p priorityHigh) Priority() int { - return 3 -} - -func (p priorityLow) Compare(other queue.Item) int { - op := other.(priority) - if p.Priority() > op.Priority() { - return 1 - } else if p.Priority() == op.Priority() { - return 0 - } - return -1 -} - -func (p priorityNormal) Compare(other queue.Item) int { - op := other.(priority) - if p.Priority() > op.Priority() { - return 1 - } else if p.Priority() == op.Priority() { - return 0 - } - return -1 -} - -func (p priorityHigh) Compare(other queue.Item) int { - op := other.(priority) - if p.Priority() > op.Priority() { - return 1 - } else if p.Priority() == op.Priority() { - return 0 - } - return -1 -} - -type noOpEvent struct { - priorityLow -} - -var noOp = noOpEvent{} diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index 1c6ec858b..a826ef79b 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -10,20 +10,21 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - abcicli "github.com/tendermint/tendermint/abci/client" + dbm "github.com/tendermint/tm-db" + + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/evidence" tmsync "github.com/tendermint/tendermint/internal/libs/sync" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" "github.com/tendermint/tendermint/internal/p2p" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) // Byzantine node sends two different prevotes (nil and blockID) to the same @@ -61,9 +62,9 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { blockStore := store.NewBlockStore(blockDB) // one for mempool, one for consensus - mtx := new(tmsync.RWMutex) - proxyAppConnMem := abcicli.NewLocalClient(mtx, app) - proxyAppConnCon := abcicli.NewLocalClient(mtx, app) + mtx := new(tmsync.Mutex) + proxyAppConnMem := abciclient.NewLocalClient(mtx, app) + proxyAppConnCon := abciclient.NewLocalClient(mtx, app) // Make Mempool mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 17ba1ce2e..8b54f6026 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -7,24 +7,24 @@ import ( "io" "io/ioutil" "os" + "path" "path/filepath" "sync" "testing" "time" "github.com/stretchr/testify/require" - - "path" - dbm "github.com/tendermint/tm-db" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" cstypes "github.com/tendermint/tendermint/internal/consensus/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" @@ -33,8 +33,6 @@ import ( tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) @@ -49,10 +47,10 @@ const ( // test. type cleanupFunc func() -func configSetup(t *testing.T) *cfg.Config { +func configSetup(t *testing.T) *config.Config { t.Helper() - config := ResetConfig("consensus_reactor_test") + cfg := ResetConfig("consensus_reactor_test") consensusReplayConfig := ResetConfig("consensus_replay_test") configStateTest := ResetConfig("consensus_state_test") @@ -60,13 +58,13 @@ func configSetup(t *testing.T) *cfg.Config { configByzantineTest := ResetConfig("consensus_byzantine_test") t.Cleanup(func() { - os.RemoveAll(config.RootDir) + os.RemoveAll(cfg.RootDir) os.RemoveAll(consensusReplayConfig.RootDir) os.RemoveAll(configStateTest.RootDir) os.RemoveAll(configMempoolTest.RootDir) os.RemoveAll(configByzantineTest.RootDir) }) - return config + return cfg } func ensureDir(dir string, mode os.FileMode) { @@ -75,8 +73,8 @@ func ensureDir(dir string, mode os.FileMode) { } } -func ResetConfig(name string) *cfg.Config { - return cfg.ResetTestRoot(name) +func ResetConfig(name string) *config.Config { + return config.ResetTestRoot(name) } //------------------------------------------------------------------------------- @@ -102,7 +100,7 @@ func newValidatorStub(privValidator types.PrivValidator, valIndex int32) *valida } func (vs *validatorStub) signVote( - config *cfg.Config, + cfg *config.Config, voteType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader) (*types.Vote, error) { @@ -122,7 +120,7 @@ func (vs *validatorStub) signVote( BlockID: types.BlockID{Hash: hash, PartSetHeader: header}, } v := vote.ToProto() - if err := vs.PrivValidator.SignVote(context.Background(), config.ChainID(), v); err != nil { + if err := vs.PrivValidator.SignVote(context.Background(), cfg.ChainID(), v); err != nil { return nil, fmt.Errorf("sign vote failed: %w", err) } @@ -141,12 +139,12 @@ func (vs *validatorStub) signVote( // Sign vote for type/hash/header func signVote( vs *validatorStub, - config *cfg.Config, + cfg *config.Config, voteType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote { - v, err := vs.signVote(config, voteType, hash, header) + v, err := vs.signVote(cfg, voteType, hash, header) if err != nil { panic(fmt.Errorf("failed to sign vote: %v", err)) } @@ -157,14 +155,14 @@ func signVote( } func signVotes( - config *cfg.Config, + cfg *config.Config, voteType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote { votes := make([]*types.Vote, len(vss)) for i, vs := range vss { - votes[i] = signVote(vs, config, voteType, hash, header) + votes[i] = signVote(vs, cfg, voteType, hash, header) } return votes } @@ -255,14 +253,14 @@ func addVotes(to *State, votes ...*types.Vote) { } func signAddVotes( - config *cfg.Config, + cfg *config.Config, to *State, voteType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader, vss ...*validatorStub, ) { - votes := signVotes(config, voteType, hash, header, vss...) + votes := signVotes(cfg, voteType, hash, header, vss...) addVotes(to, votes...) } @@ -387,12 +385,12 @@ func subscribeToVoter(cs *State, addr []byte) <-chan tmpubsub.Message { // consensus states func newState(state sm.State, pv types.PrivValidator, app abci.Application) *State { - config := cfg.ResetTestRoot("consensus_state_test") - return newStateWithConfig(config, state, pv, app) + cfg := config.ResetTestRoot("consensus_state_test") + return newStateWithConfig(cfg, state, pv, app) } func newStateWithConfig( - thisConfig *cfg.Config, + thisConfig *config.Config, state sm.State, pv types.PrivValidator, app abci.Application, @@ -402,16 +400,16 @@ func newStateWithConfig( } func newStateWithConfigAndBlockStore( - thisConfig *cfg.Config, + thisConfig *config.Config, state sm.State, pv types.PrivValidator, app abci.Application, blockStore *store.BlockStore, ) *State { // one for mempool, one for consensus - mtx := new(tmsync.RWMutex) - proxyAppConnMem := abcicli.NewLocalClient(mtx, app) - proxyAppConnCon := abcicli.NewLocalClient(mtx, app) + mtx := new(tmsync.Mutex) + proxyAppConnMem := abciclient.NewLocalClient(mtx, app) + proxyAppConnCon := abciclient.NewLocalClient(mtx, app) // Make Mempool mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) @@ -444,10 +442,10 @@ func newStateWithConfigAndBlockStore( return cs } -func loadPrivValidator(config *cfg.Config) *privval.FilePV { - privValidatorKeyFile := config.PrivValidator.KeyFile() +func loadPrivValidator(cfg *config.Config) *privval.FilePV { + privValidatorKeyFile := cfg.PrivValidator.KeyFile() ensureDir(filepath.Dir(privValidatorKeyFile), 0700) - privValidatorStateFile := config.PrivValidator.StateFile() + privValidatorStateFile := cfg.PrivValidator.StateFile() privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) if err != nil { panic(err) @@ -456,9 +454,9 @@ func loadPrivValidator(config *cfg.Config) *privval.FilePV { return privValidator } -func randState(config *cfg.Config, nValidators int) (*State, []*validatorStub) { +func randState(cfg *config.Config, nValidators int) (*State, []*validatorStub) { // Get State - state, privVals := randGenesisState(config, nValidators, false, 10) + state, privVals := randGenesisState(cfg, nValidators, false, 10) vss := make([]*validatorStub, nValidators) @@ -704,15 +702,15 @@ func consensusLogger() log.Logger { func randConsensusState( t *testing.T, - config *cfg.Config, + cfg *config.Config, nValidators int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application, - configOpts ...func(*cfg.Config), + configOpts ...func(*config.Config), ) ([]*State, cleanupFunc) { - genDoc, privVals := factory.RandGenesisDoc(config, nValidators, false, 30) + genDoc, privVals := factory.RandGenesisDoc(cfg, nValidators, false, 30) css := make([]*State, nValidators) logger := consensusLogger() @@ -759,18 +757,18 @@ func randConsensusState( // nPeers = nValidators + nNotValidator func randConsensusNetWithPeers( - config *cfg.Config, + cfg *config.Config, nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func(string) abci.Application, -) ([]*State, *types.GenesisDoc, *cfg.Config, cleanupFunc) { - genDoc, privVals := factory.RandGenesisDoc(config, nValidators, false, testMinPower) +) ([]*State, *types.GenesisDoc, *config.Config, cleanupFunc) { + genDoc, privVals := factory.RandGenesisDoc(cfg, nValidators, false, testMinPower) css := make([]*State, nPeers) logger := consensusLogger() - var peer0Config *cfg.Config + var peer0Config *config.Config configRootDirs := make([]string, 0, nPeers) for i := 0; i < nPeers; i++ { state, _ := sm.MakeGenesisState(genDoc) @@ -799,7 +797,7 @@ func randConsensusNetWithPeers( } } - app := appFunc(path.Join(config.DBDir(), fmt.Sprintf("%s_%d", testName, i))) + app := appFunc(path.Join(cfg.DBDir(), fmt.Sprintf("%s_%d", testName, i))) vals := types.TM2PB.ValidatorUpdates(state.Validators) if _, ok := app.(*kvstore.PersistentKVStoreApplication); ok { // simulate handshake, receive app version. If don't do this, replay test will fail @@ -820,12 +818,12 @@ func randConsensusNetWithPeers( } func randGenesisState( - config *cfg.Config, + cfg *config.Config, numValidators int, randPower bool, minPower int64) (sm.State, []types.PrivValidator) { - genDoc, privValidators := factory.RandGenesisDoc(config, numValidators, randPower, minPower) + genDoc, privValidators := factory.RandGenesisDoc(cfg, numValidators, randPower, minPower) s0, _ := sm.MakeGenesisState(genDoc) return s0, privValidators } diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index 5edec248a..4b2037469 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -10,20 +10,19 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/abci/example/code" abci "github.com/tendermint/tendermint/abci/types" - mempl "github.com/tendermint/tendermint/internal/mempool" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/store" + "github.com/tendermint/tendermint/internal/mempool" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/types" ) // for testing -func assertMempool(txn txNotifier) mempl.Mempool { - return txn.(mempl.Mempool) +func assertMempool(txn txNotifier) mempool.Mempool { + return txn.(mempool.Mempool) } func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { @@ -113,7 +112,7 @@ func deliverTxsRange(cs *State, start, end int) { for i := start; i < end; i++ { txBytes := make([]byte, 8) binary.BigEndian.PutUint64(txBytes, uint64(i)) - err := assertMempool(cs.txNotifier).CheckTx(context.Background(), txBytes, nil, mempl.TxInfo{}) + err := assertMempool(cs.txNotifier).CheckTx(context.Background(), txBytes, nil, mempool.TxInfo{}) if err != nil { panic(fmt.Sprintf("Error after CheckTx: %v", err)) } @@ -179,7 +178,7 @@ func TestMempoolRmBadTx(t *testing.T) { return } checkTxRespCh <- struct{}{} - }, mempl.TxInfo{}) + }, mempool.TxInfo{}) if err != nil { t.Errorf("error after CheckTx: %v", err) return diff --git a/internal/consensus/metrics.go b/internal/consensus/metrics.go index bceac4942..a75f1505c 100644 --- a/internal/consensus/metrics.go +++ b/internal/consensus/metrics.go @@ -61,6 +61,9 @@ type Metrics struct { // Number of blockparts transmitted by peer. BlockParts metrics.Counter + + // Histogram of time taken per step annotated with reason that the step proceeded. + StepTime metrics.Histogram } // PrometheusMetrics returns Metrics build using Prometheus client library. @@ -187,6 +190,12 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "block_parts", Help: "Number of blockparts transmitted by peer.", }, append(labels, "peer_id")).With(labelsAndValues...), + StepTime: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "step_time", + Help: "Time spent per step.", + }, append(labels, "step", "reason")).With(labelsAndValues...), } } diff --git a/internal/consensus/mocks/cons_sync_reactor.go b/internal/consensus/mocks/cons_sync_reactor.go index 263969798..5ac592f0d 100644 --- a/internal/consensus/mocks/cons_sync_reactor.go +++ b/internal/consensus/mocks/cons_sync_reactor.go @@ -4,7 +4,7 @@ package mocks import ( mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/state" + state "github.com/tendermint/tendermint/internal/state" ) // ConsSyncReactor is an autogenerated mock type for the ConsSyncReactor type diff --git a/internal/consensus/mocks/fast_sync_reactor.go b/internal/consensus/mocks/fast_sync_reactor.go index b7f521ff2..9da851065 100644 --- a/internal/consensus/mocks/fast_sync_reactor.go +++ b/internal/consensus/mocks/fast_sync_reactor.go @@ -4,7 +4,7 @@ package mocks import ( mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/state" + state "github.com/tendermint/tendermint/internal/state" time "time" ) diff --git a/internal/consensus/msgs.go b/internal/consensus/msgs.go index 17aef9aa2..052b8f556 100644 --- a/internal/consensus/msgs.go +++ b/internal/consensus/msgs.go @@ -77,7 +77,7 @@ func (m *NewRoundStepMessage) ValidateHeight(initialHeight int64) error { m.LastCommitRound, initialHeight) } if m.Height > initialHeight && m.LastCommitRound < 0 { - return fmt.Errorf("LastCommitRound can only be negative for initial height %v", // nolint + return fmt.Errorf("LastCommitRound can only be negative for initial height %v", initialHeight) } return nil diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index 2b9fa7358..62517fd4f 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -8,77 +8,62 @@ import ( cstypes "github.com/tendermint/tendermint/internal/consensus/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/bits" tmevents "github.com/tendermint/tendermint/libs/events" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) var ( _ service.Service = (*Reactor)(nil) _ p2p.Wrapper = (*tmcons.Message)(nil) +) - // ChannelShims contains a map of ChannelDescriptorShim objects, where each - // object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding - // p2p proto.Message the new p2p Channel is responsible for handling. - // - // - // TODO: Remove once p2p refactor is complete. - // ref: https://github.com/tendermint/tendermint/issues/5670 - ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - StateChannel: { - MsgType: new(tmcons.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(StateChannel), - Priority: 8, - SendQueueCapacity: 64, - RecvMessageCapacity: maxMsgSize, - RecvBufferCapacity: 128, - MaxSendBytes: 12000, - }, +// GetChannelDescriptor produces an instance of a descriptor for this +// package's required channels. +func GetChannelDescriptors() []*p2p.ChannelDescriptor { + return []*p2p.ChannelDescriptor{ + { + ID: StateChannel, + MessageType: new(tmcons.Message), + Priority: 8, + SendQueueCapacity: 64, + RecvMessageCapacity: maxMsgSize, + RecvBufferCapacity: 128, }, - DataChannel: { - MsgType: new(tmcons.Message), - Descriptor: &p2p.ChannelDescriptor{ - // TODO: Consider a split between gossiping current block and catchup - // stuff. Once we gossip the whole block there is nothing left to send - // until next height or round. - ID: byte(DataChannel), - Priority: 12, - SendQueueCapacity: 64, - RecvBufferCapacity: 512, - RecvMessageCapacity: maxMsgSize, - MaxSendBytes: 40000, - }, + { + // TODO: Consider a split between gossiping current block and catchup + // stuff. Once we gossip the whole block there is nothing left to send + // until next height or round. + ID: DataChannel, + MessageType: new(tmcons.Message), + Priority: 12, + SendQueueCapacity: 64, + RecvBufferCapacity: 512, + RecvMessageCapacity: maxMsgSize, }, - VoteChannel: { - MsgType: new(tmcons.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(VoteChannel), - Priority: 10, - SendQueueCapacity: 64, - RecvBufferCapacity: 128, - RecvMessageCapacity: maxMsgSize, - MaxSendBytes: 150, - }, + { + ID: VoteChannel, + MessageType: new(tmcons.Message), + Priority: 10, + SendQueueCapacity: 64, + RecvBufferCapacity: 128, + RecvMessageCapacity: maxMsgSize, }, - VoteSetBitsChannel: { - MsgType: new(tmcons.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(VoteSetBitsChannel), - Priority: 5, - SendQueueCapacity: 8, - RecvBufferCapacity: 128, - RecvMessageCapacity: maxMsgSize, - MaxSendBytes: 50, - }, + { + ID: VoteSetBitsChannel, + MessageType: new(tmcons.Message), + Priority: 5, + SendQueueCapacity: 8, + RecvBufferCapacity: 128, + RecvMessageCapacity: maxMsgSize, }, } -) +} const ( StateChannel = p2p.ChannelID(0x20) @@ -230,17 +215,15 @@ func (r *Reactor) OnStop() { } r.mtx.Lock() - peers := r.peers + // Close and wait for each of the peers to shutdown. + // This is safe to perform with the lock since none of the peers require the + // lock to complete any of the methods that the waitgroup is waiting on. + for _, state := range r.peers { + state.closer.Close() + state.broadcastWG.Wait() + } r.mtx.Unlock() - // wait for all spawned peer goroutines to gracefully exit - for _, ps := range peers { - ps.closer.Close() - } - for _, ps := range peers { - ps.broadcastWG.Wait() - } - // Close the StateChannel goroutine separately since it uses its own channel // to signal closure. close(r.stateCloseCh) @@ -1096,7 +1079,7 @@ func (r *Reactor) handleDataMessage(envelope p2p.Envelope, msgI Message) error { } if r.WaitSync() { - logger.Info("ignoring message received during sync", "msg", msgI) + logger.Info("ignoring message received during sync", "msg", fmt.Sprintf("%T", msgI)) return nil } diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index 8c70ca1d5..16fa13969 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -12,26 +12,26 @@ import ( "github.com/fortytw2/leaktest" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/crypto/encoding" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" + sm "github.com/tendermint/tendermint/internal/state" + statemocks "github.com/tendermint/tendermint/internal/state/mocks" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" - sm "github.com/tendermint/tendermint/state" - statemocks "github.com/tendermint/tendermint/state/mocks" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) var ( @@ -50,9 +50,11 @@ type reactorTestSuite struct { voteSetBitsChannels map[types.NodeID]*p2p.Channel } -func chDesc(chID p2p.ChannelID) p2p.ChannelDescriptor { - return p2p.ChannelDescriptor{ - ID: byte(chID), +func chDesc(chID p2p.ChannelID, size int) *p2p.ChannelDescriptor { + return &p2p.ChannelDescriptor{ + ID: chID, + MessageType: new(tmcons.Message), + RecvBufferCapacity: size, } } @@ -67,10 +69,10 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu blocksyncSubs: make(map[types.NodeID]types.Subscription, numNodes), } - rts.stateChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(StateChannel), new(tmcons.Message), size) - rts.dataChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(DataChannel), new(tmcons.Message), size) - rts.voteChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteChannel), new(tmcons.Message), size) - rts.voteSetBitsChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteSetBitsChannel), new(tmcons.Message), size) + rts.stateChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(StateChannel, size)) + rts.dataChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(DataChannel, size)) + rts.voteChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteChannel, size)) + rts.voteSetBitsChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(VoteSetBitsChannel, size)) _, cancel := context.WithCancel(context.Background()) @@ -273,11 +275,11 @@ func ensureBlockSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, he } func TestReactorBasic(t *testing.T) { - config := configSetup(t) + cfg := configSetup(t) n := 4 states, cleanup := randConsensusState(t, - config, n, "consensus_reactor_test", + cfg, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) t.Cleanup(cleanup) @@ -316,14 +318,14 @@ func TestReactorBasic(t *testing.T) { } func TestReactorWithEvidence(t *testing.T) { - config := configSetup(t) + cfg := configSetup(t) n := 4 testName := "consensus_reactor_test" tickerFunc := newMockTickerFunc(true) appFunc := newKVStore - genDoc, privVals := factory.RandGenesisDoc(config, n, false, 30) + genDoc, privVals := factory.RandGenesisDoc(cfg, n, false, 30) states := make([]*State, n) logger := consensusLogger() @@ -346,9 +348,9 @@ func TestReactorWithEvidence(t *testing.T) { blockStore := store.NewBlockStore(blockDB) // one for mempool, one for consensus - mtx := new(tmsync.RWMutex) - proxyAppConnMem := abcicli.NewLocalClient(mtx, app) - proxyAppConnCon := abcicli.NewLocalClient(mtx, app) + mtx := new(tmsync.Mutex) + proxyAppConnMem := abciclient.NewLocalClient(mtx, app) + proxyAppConnCon := abciclient.NewLocalClient(mtx, app) mempool := mempoolv0.NewCListMempool(thisConfig.Mempool, proxyAppConnMem, 0) mempool.SetLogger(log.TestingLogger().With("module", "mempool")) @@ -360,7 +362,7 @@ func TestReactorWithEvidence(t *testing.T) { // everyone includes evidence of another double signing vIdx := (i + 1) % n - ev := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], config.ChainID()) + ev := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], cfg.ChainID()) evpool := &statemocks.EvidencePool{} evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil) evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]types.Evidence{ @@ -412,17 +414,17 @@ func TestReactorWithEvidence(t *testing.T) { } func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { - config := configSetup(t) + cfg := configSetup(t) n := 4 states, cleanup := randConsensusState( t, - config, + cfg, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore, - func(c *cfg.Config) { + func(c *config.Config) { c.Consensus.CreateEmptyBlocks = false }, ) @@ -462,11 +464,11 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { } func TestReactorRecordsVotesAndBlockParts(t *testing.T) { - config := configSetup(t) + cfg := configSetup(t) n := 4 states, cleanup := randConsensusState(t, - config, n, "consensus_reactor_test", + cfg, n, "consensus_reactor_test", newMockTickerFunc(true), newKVStore) t.Cleanup(cleanup) @@ -521,12 +523,12 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) { } func TestReactorVotingPowerChange(t *testing.T) { - config := configSetup(t) + cfg := configSetup(t) n := 4 states, cleanup := randConsensusState( t, - config, + cfg, n, "consensus_voting_power_changes_test", newMockTickerFunc(true), @@ -573,7 +575,7 @@ func TestReactorVotingPowerChange(t *testing.T) { val1PubKey, err := states[0].privValidator.GetPubKey(context.Background()) require.NoError(t, err) - val1PubKeyABCI, err := cryptoenc.PubKeyToProto(val1PubKey) + val1PubKeyABCI, err := encoding.PubKeyToProto(val1PubKey) require.NoError(t, err) updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25) @@ -622,12 +624,12 @@ func TestReactorVotingPowerChange(t *testing.T) { } func TestReactorValidatorSetChanges(t *testing.T) { - config := configSetup(t) + cfg := configSetup(t) nPeers := 7 nVals := 4 states, _, _, cleanup := randConsensusNetWithPeers( - config, + cfg, nVals, nPeers, "consensus_val_set_changes_test", @@ -668,7 +670,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { newValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(context.Background()) require.NoError(t, err) - valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1) + valPubKey1ABCI, err := encoding.PubKeyToProto(newValidatorPubKey1) require.NoError(t, err) newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) @@ -701,7 +703,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { updateValidatorPubKey1, err := states[nVals].privValidator.GetPubKey(context.Background()) require.NoError(t, err) - updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1) + updatePubKey1ABCI, err := encoding.PubKeyToProto(updateValidatorPubKey1) require.NoError(t, err) updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) @@ -721,7 +723,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { newValidatorPubKey2, err := states[nVals+1].privValidator.GetPubKey(context.Background()) require.NoError(t, err) - newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2) + newVal2ABCI, err := encoding.PubKeyToProto(newValidatorPubKey2) require.NoError(t, err) newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) @@ -729,7 +731,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { newValidatorPubKey3, err := states[nVals+2].privValidator.GetPubKey(context.Background()) require.NoError(t, err) - newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3) + newVal3ABCI, err := encoding.PubKeyToProto(newValidatorPubKey3) require.NoError(t, err) newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) diff --git a/internal/consensus/replay_file.go b/internal/consensus/replay_file.go index 5d15c9a62..f75956ec1 100644 --- a/internal/consensus/replay_file.go +++ b/internal/consensus/replay_file.go @@ -12,13 +12,13 @@ import ( dbm "github.com/tendermint/tm-db" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) @@ -31,8 +31,8 @@ const ( // replay messages interactively or all at once // replay the wal file -func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console bool) { - consensusState := newConsensusStateForReplay(config, csConfig) +func RunReplayFile(cfg config.BaseConfig, csConfig *config.ConsensusConfig, console bool) { + consensusState := newConsensusStateForReplay(cfg, csConfig) if err := consensusState.ReplayFile(csConfig.WalFile(), console); err != nil { tmos.Exit(fmt.Sprintf("Error during consensus replay: %v", err)) @@ -286,22 +286,22 @@ func (pb *playback) replayConsoleLoop() int { //-------------------------------------------------------------------------------- // convenience for replay mode -func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig) *State { - dbType := dbm.BackendType(config.DBBackend) +func newConsensusStateForReplay(cfg config.BaseConfig, csConfig *config.ConsensusConfig) *State { + dbType := dbm.BackendType(cfg.DBBackend) // Get BlockStore - blockStoreDB, err := dbm.NewDB("blockstore", dbType, config.DBDir()) + blockStoreDB, err := dbm.NewDB("blockstore", dbType, cfg.DBDir()) if err != nil { tmos.Exit(err.Error()) } blockStore := store.NewBlockStore(blockStoreDB) // Get State - stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) + stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) if err != nil { tmos.Exit(err.Error()) } stateStore := sm.NewStore(stateDB) - gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile()) + gdoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile()) if err != nil { tmos.Exit(err.Error()) } @@ -311,8 +311,8 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo } // Create proxyAppConn connection (consensus, mempool, query) - clientCreator, _ := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()) - proxyApp := proxy.NewAppConns(clientCreator) + clientCreator, _ := proxy.DefaultClientCreator(cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) + proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) err = proxyApp.Start() if err != nil { tmos.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err)) diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index 21ac565d8..6c206db03 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -5,7 +5,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/libs/clist" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/types" ) @@ -13,22 +13,23 @@ import ( type emptyMempool struct{} -var _ mempl.Mempool = emptyMempool{} +var _ mempool.Mempool = emptyMempool{} func (emptyMempool) Lock() {} func (emptyMempool) Unlock() {} func (emptyMempool) Size() int { return 0 } -func (emptyMempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error { +func (emptyMempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempool.TxInfo) error { return nil } +func (emptyMempool) RemoveTxByKey(txKey types.TxKey) error { return nil } func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } func (emptyMempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } func (emptyMempool) Update( _ int64, _ types.Txs, _ []*abci.ResponseDeliverTx, - _ mempl.PreCheckFunc, - _ mempl.PostCheckFunc, + _ mempool.PreCheckFunc, + _ mempool.PostCheckFunc, ) error { return nil } diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index e6824e3c9..8db6381ce 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -17,12 +17,13 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -47,7 +48,7 @@ import ( // and which ones we need the wal for - then we'd also be able to only flush the // wal writer when we need to, instead of with every message. -func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Config, +func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *config.Config, lastBlockHeight int64, blockDB dbm.DB, stateStore sm.Store) { logger := log.TestingLogger() state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) @@ -96,7 +97,7 @@ func sendTxs(ctx context.Context, cs *State) { return default: tx := []byte{byte(i)} - if err := assertMempool(cs.txNotifier).CheckTx(context.Background(), tx, nil, mempl.TxInfo{}); err != nil { + if err := assertMempool(cs.txNotifier).CheckTx(context.Background(), tx, nil, mempool.TxInfo{}); err != nil { panic(err) } i++ @@ -130,7 +131,7 @@ func TestWALCrash(t *testing.T) { } } -func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *cfg.Config, +func crashWALandCheckLiveness(t *testing.T, consensusReplayConfig *config.Config, initFn func(dbm.DB, *State, context.Context), heightToStop int64) { walPanicked := make(chan error) crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop} @@ -279,12 +280,12 @@ func (w *crashingWAL) Wait() { w.next.Wait() } //------------------------------------------------------------------------------------------ type simulatorTestSuite struct { GenesisState sm.State - Config *cfg.Config + Config *config.Config Chain []*types.Block Commits []*types.Commit CleanupFunc cleanupFunc - Mempool mempl.Mempool + Mempool mempool.Mempool Evpool sm.EvidencePool } @@ -304,7 +305,7 @@ var modes = []uint{0, 1, 2, 3} // This is actually not a test, it's for storing validator change tx data for testHandshakeReplay func setupSimulator(t *testing.T) *simulatorTestSuite { t.Helper() - config := configSetup(t) + cfg := configSetup(t) sim := &simulatorTestSuite{ Mempool: emptyMempool{}, @@ -314,14 +315,14 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { nPeers := 7 nVals := 4 - css, genDoc, config, cleanup := randConsensusNetWithPeers( - config, + css, genDoc, cfg, cleanup := randConsensusNetWithPeers( + cfg, nVals, nPeers, "replay_test", newMockTickerFunc(true), newPersistentKVStoreWithPath) - sim.Config = config + sim.Config = cfg sim.GenesisState, _ = sm.MakeGenesisState(genDoc) sim.CleanupFunc = cleanup @@ -354,10 +355,10 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { incrementHeight(vss...) newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(context.Background()) require.NoError(t, err) - valPubKey1ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey1) + valPubKey1ABCI, err := encoding.PubKeyToProto(newValidatorPubKey1) require.NoError(t, err) newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx1, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx1, nil, mempool.TxInfo{}) assert.Nil(t, err) propBlock, _ := css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts := propBlock.MakePartSet(partSize) @@ -365,7 +366,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { proposal := types.NewProposal(vss[1].Height, round, -1, blockID) p := proposal.ToProto() - if err := vss[1].SignProposal(context.Background(), config.ChainID(), p); err != nil { + if err := vss[1].SignProposal(context.Background(), cfg.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -386,10 +387,10 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { incrementHeight(vss...) updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey(context.Background()) require.NoError(t, err) - updatePubKey1ABCI, err := cryptoenc.PubKeyToProto(updateValidatorPubKey1) + updatePubKey1ABCI, err := encoding.PubKeyToProto(updateValidatorPubKey1) require.NoError(t, err) updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), updateValidatorTx1, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), updateValidatorTx1, nil, mempool.TxInfo{}) assert.Nil(t, err) propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts = propBlock.MakePartSet(partSize) @@ -397,7 +398,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { proposal = types.NewProposal(vss[2].Height, round, -1, blockID) p = proposal.ToProto() - if err := vss[2].SignProposal(context.Background(), config.ChainID(), p); err != nil { + if err := vss[2].SignProposal(context.Background(), cfg.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -418,17 +419,17 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { incrementHeight(vss...) newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey(context.Background()) require.NoError(t, err) - newVal2ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey2) + newVal2ABCI, err := encoding.PubKeyToProto(newValidatorPubKey2) require.NoError(t, err) newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx2, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx2, nil, mempool.TxInfo{}) assert.Nil(t, err) newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey(context.Background()) require.NoError(t, err) - newVal3ABCI, err := cryptoenc.PubKeyToProto(newValidatorPubKey3) + newVal3ABCI, err := encoding.PubKeyToProto(newValidatorPubKey3) require.NoError(t, err) newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx3, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), newValidatorTx3, nil, mempool.TxInfo{}) assert.Nil(t, err) propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts = propBlock.MakePartSet(partSize) @@ -456,7 +457,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { proposal = types.NewProposal(vss[3].Height, round, -1, blockID) p = proposal.ToProto() - if err := vss[3].SignProposal(context.Background(), config.ChainID(), p); err != nil { + if err := vss[3].SignProposal(context.Background(), cfg.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -468,7 +469,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { ensureNewProposal(proposalCh, height, round) removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx2, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx2, nil, mempool.TxInfo{}) assert.Nil(t, err) rs = css[0].GetRoundState() @@ -507,7 +508,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { height++ incrementHeight(vss...) removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) - err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx3, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(context.Background(), removeValidatorTx3, nil, mempool.TxInfo{}) assert.Nil(t, err) propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts = propBlock.MakePartSet(partSize) @@ -519,7 +520,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { selfIndex = valIndexFn(0) proposal = types.NewProposal(vss[1].Height, round, -1, blockID) p = proposal.ToProto() - if err := vss[1].SignProposal(context.Background(), config.ChainID(), p); err != nil { + if err := vss[1].SignProposal(context.Background(), cfg.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) } proposal.Signature = p.Signature @@ -569,7 +570,7 @@ func tempWALWithData(data []byte) string { } func applyBlock(stateStore sm.Store, - mempool mempl.Mempool, + mempool mempool.Mempool, evpool sm.EvidencePool, st sm.State, blk *types.Block, @@ -589,7 +590,7 @@ func applyBlock(stateStore sm.Store, func buildAppStateFromChain( proxyApp proxy.AppConns, stateStore sm.Store, - mempool mempl.Mempool, + mempool mempool.Mempool, evpool sm.EvidencePool, state sm.State, chain []*types.Block, @@ -636,8 +637,8 @@ func buildAppStateFromChain( } func buildTMStateFromChain( - config *cfg.Config, - mempool mempl.Mempool, + cfg *config.Config, + mempool mempool.Mempool, evpool sm.EvidencePool, stateStore sm.Store, state sm.State, @@ -647,11 +648,11 @@ func buildTMStateFromChain( blockStore *mockBlockStore) sm.State { // run the whole chain against this client to build up the tendermint state kvstoreApp := kvstore.NewPersistentKVStoreApplication( - filepath.Join(config.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode))) + filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode))) defer kvstoreApp.Close() - clientCreator := proxy.NewLocalClientCreator(kvstoreApp) + clientCreator := abciclient.NewLocalCreator(kvstoreApp) - proxyApp := proxy.NewAppConns(clientCreator) + proxyApp := proxy.NewAppConns(clientCreator, proxy.NopMetrics()) if err := proxyApp.Start(); err != nil { panic(err) } @@ -842,14 +843,14 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} { // fresh state and mock store func stateAndStore( - config *cfg.Config, + cfg *config.Config, pubKey crypto.PubKey, appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) { stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile()) + state, _ := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) state.Version.Consensus.App = appVersion - store := newMockBlockStore(config, state.ConsensusParams) + store := newMockBlockStore(cfg, state.ConsensusParams) if err := stateStore.Save(state); err != nil { panic(err) } @@ -860,7 +861,7 @@ func stateAndStore( // mock block store type mockBlockStore struct { - config *cfg.Config + cfg *config.Config params types.ConsensusParams chain []*types.Block commits []*types.Commit @@ -868,8 +869,8 @@ type mockBlockStore struct { } // TODO: NewBlockStore(db.NewMemDB) ... -func newMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore { - return &mockBlockStore{config, params, nil, nil, 0} +func newMockBlockStore(cfg *config.Config, params types.ConsensusParams) *mockBlockStore { + return &mockBlockStore{cfg, params, nil, nil, 0} } func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) } diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 4da989b40..42900a7d4 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -12,11 +12,12 @@ import ( "github.com/gogo/protobuf/proto" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" cstypes "github.com/tendermint/tendermint/internal/consensus/types" "github.com/tendermint/tendermint/internal/libs/fail" tmsync "github.com/tendermint/tendermint/internal/libs/sync" + sm "github.com/tendermint/tendermint/internal/state" tmevents "github.com/tendermint/tendermint/libs/events" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" @@ -27,7 +28,6 @@ import ( "github.com/tendermint/tendermint/privval" tmgrpc "github.com/tendermint/tendermint/privval/grpc" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -80,7 +80,7 @@ type State struct { service.BaseService // config details - config *cfg.ConsensusConfig + config *config.ConsensusConfig privValidator types.PrivValidator // for signing votes privValidatorType types.PrivValidatorType @@ -137,7 +137,7 @@ type State struct { done chan struct{} // synchronous pubsub between consensus state and reactor. - // state only emits EventNewRoundStep and EventVote + // state only emits EventNewRoundStep, EventValidBlock, and EventVote evsw tmevents.EventSwitch // for reporting metrics @@ -152,7 +152,7 @@ type StateOption func(*State) // NewState returns a new State. func NewState( - config *cfg.ConsensusConfig, + cfg *config.ConsensusConfig, state sm.State, blockExec *sm.BlockExecutor, blockStore sm.BlockStore, @@ -161,7 +161,7 @@ func NewState( options ...StateOption, ) *State { cs := &State{ - config: config, + config: cfg, blockExec: blockExec, blockStore: blockStore, txNotifier: txNotifier, @@ -241,8 +241,12 @@ func (cs *State) GetLastHeight() int64 { // GetRoundState returns a shallow copy of the internal consensus state. func (cs *State) GetRoundState() *cstypes.RoundState { cs.mtx.RLock() + defer cs.mtx.RUnlock() + + // NOTE: this might be dodgy, as RoundState itself isn't thread + // safe as it contains a number of pointers and is explicitly + // not thread safe. rs := cs.RoundState // copy - cs.mtx.RUnlock() return &rs } @@ -916,8 +920,8 @@ func (cs *State) handleMsg(mi msgInfo) { "height", cs.Height, "round", cs.Round, "peer", peerID, + "msg_type", fmt.Sprintf("%T", msg), "err", err, - "msg", msg, ) } } diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index b3b7c81a3..a1db8276d 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -13,7 +13,6 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/crypto/tmhash" cstypes "github.com/tendermint/tendermint/internal/consensus/types" - p2pmock "github.com/tendermint/tendermint/internal/p2p/mock" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -331,7 +330,7 @@ func TestStateFullRound1(t *testing.T) { t.Error(err) } - voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote) + voteCh := subscribe(cs.eventBus, types.EventQueryVote) propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) @@ -361,7 +360,7 @@ func TestStateFullRoundNil(t *testing.T) { cs, vss := randState(config, 1) height, round := cs.Height, cs.Round - voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote) + voteCh := subscribe(cs.eventBus, types.EventQueryVote) cs.enterPrevote(height, round) cs.startRoutines(4) @@ -382,7 +381,7 @@ func TestStateFullRound2(t *testing.T) { vs2 := vss[1] height, round := cs1.Height, cs1.Round - voteCh := subscribeUnBuffered(cs1.eventBus, types.EventQueryVote) + voteCh := subscribe(cs1.eventBus, types.EventQueryVote) newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) // start round and wait for propose and prevote @@ -428,7 +427,7 @@ func TestStateLockNoPOL(t *testing.T) { timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - voteCh := subscribeUnBuffered(cs1.eventBus, types.EventQueryVote) + voteCh := subscribe(cs1.eventBus, types.EventQueryVote) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) @@ -1864,7 +1863,8 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { // create dummy peer cs, _ := randState(config, 1) - peer := p2pmock.NewPeer(nil) + peerID, err := types.NewNodeID("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") + require.NoError(t, err) // 1) new block part parts := types.NewPartSetFromData(tmrand.Bytes(100), 10) @@ -1875,26 +1875,26 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { } cs.ProposalBlockParts = types.NewPartSetFromHeader(parts.Header()) - cs.handleMsg(msgInfo{msg, peer.ID()}) + cs.handleMsg(msgInfo{msg, peerID}) statsMessage := <-cs.statsMsgQueue require.Equal(t, msg, statsMessage.Msg, "") - require.Equal(t, peer.ID(), statsMessage.PeerID, "") + require.Equal(t, peerID, statsMessage.PeerID, "") // sending the same part from different peer cs.handleMsg(msgInfo{msg, "peer2"}) // sending the part with the same height, but different round msg.Round = 1 - cs.handleMsg(msgInfo{msg, peer.ID()}) + cs.handleMsg(msgInfo{msg, peerID}) // sending the part from the smaller height msg.Height = 0 - cs.handleMsg(msgInfo{msg, peer.ID()}) + cs.handleMsg(msgInfo{msg, peerID}) // sending the part from the bigger height msg.Height = 3 - cs.handleMsg(msgInfo{msg, peer.ID()}) + cs.handleMsg(msgInfo{msg, peerID}) select { case <-cs.statsMsgQueue: @@ -1909,18 +1909,19 @@ func TestStateOutputVoteStats(t *testing.T) { cs, vss := randState(config, 2) // create dummy peer - peer := p2pmock.NewPeer(nil) + peerID, err := types.NewNodeID("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA") + require.NoError(t, err) randBytes := tmrand.Bytes(tmhash.Size) vote := signVote(vss[1], config, tmproto.PrecommitType, randBytes, types.PartSetHeader{}) voteMessage := &VoteMessage{vote} - cs.handleMsg(msgInfo{voteMessage, peer.ID()}) + cs.handleMsg(msgInfo{voteMessage, peerID}) statsMessage := <-cs.statsMsgQueue require.Equal(t, voteMessage, statsMessage.Msg, "") - require.Equal(t, peer.ID(), statsMessage.PeerID, "") + require.Equal(t, peerID, statsMessage.PeerID, "") // sending the same part from different peer cs.handleMsg(msgInfo{&VoteMessage{vote}, "peer2"}) @@ -1929,7 +1930,7 @@ func TestStateOutputVoteStats(t *testing.T) { incrementHeight(vss[1]) vote = signVote(vss[1], config, tmproto.PrecommitType, randBytes, types.PartSetHeader{}) - cs.handleMsg(msgInfo{&VoteMessage{vote}, peer.ID()}) + cs.handleMsg(msgInfo{&VoteMessage{vote}, peerID}) select { case <-cs.statsMsgQueue: @@ -1971,12 +1972,3 @@ func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Messa } return sub.Out() } - -// subscribe subscribes test client to the given query and returns a channel with cap = 0. -func subscribeUnBuffered(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message { - sub, err := eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, q) - if err != nil { - panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q)) - } - return sub.Out() -} diff --git a/internal/consensus/types/height_vote_set_test.go b/internal/consensus/types/height_vote_set_test.go index a9e309b4f..b3830a3f6 100644 --- a/internal/consensus/types/height_vote_set_test.go +++ b/internal/consensus/types/height_vote_set_test.go @@ -6,7 +6,7 @@ import ( "os" "testing" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/internal/test/factory" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -15,19 +15,19 @@ import ( "github.com/tendermint/tendermint/types" ) -var config *cfg.Config // NOTE: must be reset for each _test.go file +var cfg *config.Config // NOTE: must be reset for each _test.go file func TestMain(m *testing.M) { - config = cfg.ResetTestRoot("consensus_height_vote_set_test") + cfg = config.ResetTestRoot("consensus_height_vote_set_test") code := m.Run() - os.RemoveAll(config.RootDir) + os.RemoveAll(cfg.RootDir) os.Exit(code) } func TestPeerCatchupRounds(t *testing.T) { valSet, privVals := factory.RandValidatorSet(10, 1) - hvs := NewHeightVoteSet(config.ChainID(), 1, valSet) + hvs := NewHeightVoteSet(cfg.ChainID(), 1, valSet) vote999_0 := makeVoteHR(t, 1, 0, 999, privVals) added, err := hvs.AddVote(vote999_0, "peer1") @@ -75,7 +75,7 @@ func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []ty Type: tmproto.PrecommitType, BlockID: types.BlockID{Hash: randBytes, PartSetHeader: types.PartSetHeader{}}, } - chainID := config.ChainID() + chainID := cfg.ChainID() v := vote.ToProto() err = privVal.SignVote(context.Background(), chainID, v) diff --git a/internal/consensus/wal_fuzz.go b/internal/consensus/wal_fuzz.go index e15097c30..06d894a81 100644 --- a/internal/consensus/wal_fuzz.go +++ b/internal/consensus/wal_fuzz.go @@ -1,3 +1,4 @@ +//go:build gofuzz // +build gofuzz package consensus diff --git a/internal/consensus/wal_generator.go b/internal/consensus/wal_generator.go index 81c2125ca..f81234f97 100644 --- a/internal/consensus/wal_generator.go +++ b/internal/consensus/wal_generator.go @@ -11,15 +11,16 @@ import ( "time" "github.com/stretchr/testify/require" - db "github.com/tendermint/tm-db" + dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) @@ -29,9 +30,9 @@ import ( // (byteBufferWAL) and waits until numBlocks are created. // If the node fails to produce given numBlocks, it returns an error. func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { - config := getConfig(t) + cfg := getConfig(t) - app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), "wal_generator")) + app := kvstore.NewPersistentKVStoreApplication(filepath.Join(cfg.DBDir(), "wal_generator")) t.Cleanup(func() { require.NoError(t, app.Close()) }) logger := log.TestingLogger().With("wal_generator", "wal_generator") @@ -40,17 +41,17 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { // COPY PASTE FROM node.go WITH A FEW MODIFICATIONS // NOTE: we can't import node package because of circular dependency. // NOTE: we don't do handshake so need to set state.Version.Consensus.App directly. - privValidatorKeyFile := config.PrivValidator.KeyFile() - privValidatorStateFile := config.PrivValidator.StateFile() + privValidatorKeyFile := cfg.PrivValidator.KeyFile() + privValidatorStateFile := cfg.PrivValidator.StateFile() privValidator, err := privval.LoadOrGenFilePV(privValidatorKeyFile, privValidatorStateFile) if err != nil { return err } - genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) + genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) if err != nil { return fmt.Errorf("failed to read genesis file: %w", err) } - blockStoreDB := db.NewMemDB() + blockStoreDB := dbm.NewMemDB() stateDB := blockStoreDB stateStore := sm.NewStore(stateDB) state, err := sm.MakeGenesisState(genDoc) @@ -64,7 +65,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { blockStore := store.NewBlockStore(blockStoreDB) - proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app)) + proxyApp := proxy.NewAppConns(abciclient.NewLocalCreator(app), proxy.NopMetrics()) proxyApp.SetLogger(logger.With("module", "proxy")) if err := proxyApp.Start(); err != nil { return fmt.Errorf("failed to start proxy app connections: %w", err) @@ -88,7 +89,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { mempool := emptyMempool{} evpool := sm.EmptyEvidencePool{} blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore) - consensusState := NewState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) + consensusState := NewState(cfg.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) consensusState.SetLogger(logger) consensusState.SetEventBus(eventBus) if privValidator != nil && privValidator != (*privval.FilePV)(nil) { @@ -144,22 +145,22 @@ func randPort() int { return base + mrand.Intn(spread) } -func makeAddrs() (string, string, string) { +// makeAddrs constructs local TCP addresses for node services. +// It uses consecutive ports from a random starting point, so that concurrent +// instances are less likely to collide. +func makeAddrs() (p2pAddr, rpcAddr string) { + const addrTemplate = "tcp://127.0.0.1:%d" start := randPort() - return fmt.Sprintf("tcp://127.0.0.1:%d", start), - fmt.Sprintf("tcp://127.0.0.1:%d", start+1), - fmt.Sprintf("tcp://127.0.0.1:%d", start+2) + return fmt.Sprintf(addrTemplate, start), fmt.Sprintf(addrTemplate, start+1) } // getConfig returns a config for test cases -func getConfig(t *testing.T) *cfg.Config { - c := cfg.ResetTestRoot(t.Name()) +func getConfig(t *testing.T) *config.Config { + c := config.ResetTestRoot(t.Name()) - // and we use random ports to run in parallel - tm, rpc, grpc := makeAddrs() - c.P2P.ListenAddress = tm - c.RPC.ListenAddress = rpc - c.RPC.GRPCListenAddress = grpc + p2pAddr, rpcAddr := makeAddrs() + c.P2P.ListenAddress = p2pAddr + c.RPC.ListenAddress = rpcAddr return c } diff --git a/internal/evidence/pool.go b/internal/evidence/pool.go index 8ca97fd17..f342dec4c 100644 --- a/internal/evidence/pool.go +++ b/internal/evidence/pool.go @@ -14,9 +14,9 @@ import ( dbm "github.com/tendermint/tm-db" clist "github.com/tendermint/tendermint/internal/libs/clist" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -516,10 +516,13 @@ func (evpool *Pool) processConsensusBuffer(state sm.State) { // Check the height of the conflicting votes and fetch the corresponding time and validator set // to produce the valid evidence - var dve *types.DuplicateVoteEvidence + var ( + dve *types.DuplicateVoteEvidence + err error + ) switch { case voteSet.VoteA.Height == state.LastBlockHeight: - dve = types.NewDuplicateVoteEvidence( + dve, err = types.NewDuplicateVoteEvidence( voteSet.VoteA, voteSet.VoteB, state.LastBlockTime, @@ -527,8 +530,8 @@ func (evpool *Pool) processConsensusBuffer(state sm.State) { ) case voteSet.VoteA.Height < state.LastBlockHeight: - valSet, err := evpool.stateDB.LoadValidators(voteSet.VoteA.Height) - if err != nil { + valSet, dbErr := evpool.stateDB.LoadValidators(voteSet.VoteA.Height) + if dbErr != nil { evpool.logger.Error("failed to load validator set for conflicting votes", "height", voteSet.VoteA.Height, "err", err) continue @@ -538,7 +541,7 @@ func (evpool *Pool) processConsensusBuffer(state sm.State) { evpool.logger.Error("failed to load block time for conflicting votes", "height", voteSet.VoteA.Height) continue } - dve = types.NewDuplicateVoteEvidence( + dve, err = types.NewDuplicateVoteEvidence( voteSet.VoteA, voteSet.VoteB, blockMeta.Header.Time, @@ -554,6 +557,10 @@ func (evpool *Pool) processConsensusBuffer(state sm.State) { "state.LastBlockHeight", state.LastBlockHeight) continue } + if err != nil { + evpool.logger.Error("error in generating evidence from votes", "err", err) + continue + } // check if we already have this evidence if evpool.isPending(dve) { @@ -608,7 +615,7 @@ func prefixToBytes(prefix int64) []byte { } func keyCommitted(evidence types.Evidence) []byte { - var height int64 = evidence.Height() + height := evidence.Height() key, err := orderedcode.Append(nil, prefixCommitted, height, string(evidence.Hash())) if err != nil { panic(err) @@ -617,7 +624,7 @@ func keyCommitted(evidence types.Evidence) []byte { } func keyPending(evidence types.Evidence) []byte { - var height int64 = evidence.Height() + height := evidence.Height() key, err := orderedcode.Append(nil, prefixPending, height, string(evidence.Hash())) if err != nil { panic(err) diff --git a/internal/evidence/pool_test.go b/internal/evidence/pool_test.go index ac5f27b8e..f38c09e02 100644 --- a/internal/evidence/pool_test.go +++ b/internal/evidence/pool_test.go @@ -13,12 +13,12 @@ import ( "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/evidence/mocks" + sm "github.com/tendermint/tendermint/internal/state" + smmocks "github.com/tendermint/tendermint/internal/state/mocks" + sf "github.com/tendermint/tendermint/internal/state/test/factory" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" - sm "github.com/tendermint/tendermint/state" - smmocks "github.com/tendermint/tendermint/state/mocks" - sf "github.com/tendermint/tendermint/state/test/factory" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) diff --git a/internal/evidence/reactor.go b/internal/evidence/reactor.go index a454038fd..c2f25bd36 100644 --- a/internal/evidence/reactor.go +++ b/internal/evidence/reactor.go @@ -15,29 +15,7 @@ import ( "github.com/tendermint/tendermint/types" ) -var ( - _ service.Service = (*Reactor)(nil) - - // ChannelShims contains a map of ChannelDescriptorShim objects, where each - // object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding - // p2p proto.Message the new p2p Channel is responsible for handling. - // - // - // TODO: Remove once p2p refactor is complete. - // ref: https://github.com/tendermint/tendermint/issues/5670 - ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - EvidenceChannel: { - MsgType: new(tmproto.EvidenceList), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(EvidenceChannel), - Priority: 6, - RecvMessageCapacity: maxMsgSize, - RecvBufferCapacity: 32, - MaxSendBytes: 400, - }, - }, - } -) +var _ service.Service = (*Reactor)(nil) const ( EvidenceChannel = p2p.ChannelID(0x38) @@ -51,6 +29,18 @@ const ( broadcastEvidenceIntervalS = 10 ) +// GetChannelDescriptor produces an instance of a descriptor for this +// package's required channels. +func GetChannelDescriptor() *p2p.ChannelDescriptor { + return &p2p.ChannelDescriptor{ + ID: EvidenceChannel, + MessageType: new(tmproto.EvidenceList), + Priority: 6, + RecvMessageCapacity: maxMsgSize, + RecvBufferCapacity: 32, + } +} + // Reactor handles evpool evidence broadcasting amongst peers. type Reactor struct { service.BaseService diff --git a/internal/evidence/reactor_test.go b/internal/evidence/reactor_test.go index b098eb373..cf8f840ea 100644 --- a/internal/evidence/reactor_test.go +++ b/internal/evidence/reactor_test.go @@ -20,9 +20,9 @@ import ( "github.com/tendermint/tendermint/internal/evidence/mocks" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -62,11 +62,8 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numStateStores), } - chDesc := p2p.ChannelDescriptor{ID: byte(evidence.EvidenceChannel)} - rts.evidenceChannels = rts.network.MakeChannelsNoCleanup(t, - chDesc, - new(tmproto.EvidenceList), - int(chBuf)) + chDesc := &p2p.ChannelDescriptor{ID: evidence.EvidenceChannel, MessageType: new(tmproto.EvidenceList)} + rts.evidenceChannels = rts.network.MakeChannelsNoCleanup(t, chDesc) require.Len(t, rts.network.RandomNode().PeerManager.Peers(), 0) idx := 0 @@ -534,12 +531,13 @@ func TestEvidenceListSerialization(t *testing.T) { valSet := types.NewValidatorSet([]*types.Validator{val}) - dupl := types.NewDuplicateVoteEvidence( + dupl, err := types.NewDuplicateVoteEvidence( exampleVote(1), exampleVote(2), defaultEvidenceTime, valSet, ) + require.NoError(t, err) testCases := map[string]struct { evidenceList []types.Evidence diff --git a/internal/evidence/verify_test.go b/internal/evidence/verify_test.go index 85f997f2a..df1642f82 100644 --- a/internal/evidence/verify_test.go +++ b/internal/evidence/verify_test.go @@ -8,18 +8,17 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/evidence/mocks" + sm "github.com/tendermint/tendermint/internal/state" + smmocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" - smmocks "github.com/tendermint/tendermint/state/mocks" "github.com/tendermint/tendermint/types" ) diff --git a/internal/inspect/doc.go b/internal/inspect/doc.go new file mode 100644 index 000000000..c53049e1a --- /dev/null +++ b/internal/inspect/doc.go @@ -0,0 +1,36 @@ +/* +Package inspect provides a tool for investigating the state of a +failed Tendermint node. + +This package provides the Inspector type. The Inspector type runs a subset of the Tendermint +RPC endpoints that are useful for debugging issues with Tendermint consensus. + +When a node running the Tendermint consensus engine detects an inconsistent consensus state, +the entire node will crash. The Tendermint consensus engine cannot run in this +inconsistent state so the node will not be able to start up again. + +The RPC endpoints provided by the Inspector type allow for a node operator to inspect +the block store and state store to better understand what may have caused the inconsistent state. + + +The Inspector type's lifecycle is controlled by a context.Context + ins := inspect.NewFromConfig(rpcConfig) + ctx, cancelFunc:= context.WithCancel(context.Background()) + + // Run blocks until the Inspector server is shut down. + go ins.Run(ctx) + ... + + // calling the cancel function will stop the running inspect server + cancelFunc() + +Inspector serves its RPC endpoints on the address configured in the RPC configuration + + rpcConfig.ListenAddress = "tcp://127.0.0.1:26657" + ins := inspect.NewFromConfig(rpcConfig) + go ins.Run(ctx) + +The list of available RPC endpoints can then be viewed by navigating to +http://127.0.0.1:26657/ in the web browser. +*/ +package inspect diff --git a/internal/inspect/inspect.go b/internal/inspect/inspect.go new file mode 100644 index 000000000..90e615341 --- /dev/null +++ b/internal/inspect/inspect.go @@ -0,0 +1,148 @@ +package inspect + +import ( + "context" + "errors" + "fmt" + "net" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/inspect/rpc" + rpccore "github.com/tendermint/tendermint/internal/rpc/core" + "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer/sink" + "github.com/tendermint/tendermint/internal/store" + "github.com/tendermint/tendermint/libs/log" + tmstrings "github.com/tendermint/tendermint/libs/strings" + "github.com/tendermint/tendermint/types" + + "golang.org/x/sync/errgroup" +) + +// Inspector manages an RPC service that exports methods to debug a failed node. +// After a node shuts down due to a consensus failure, it will no longer start +// up its state cannot easily be inspected. An Inspector value provides a similar interface +// to the node, using the underlying Tendermint data stores, without bringing up +// any other components. A caller can query the Inspector service to inspect the +// persisted state and debug the failure. +type Inspector struct { + routes rpccore.RoutesMap + + config *config.RPCConfig + + indexerService *indexer.Service + eventBus *types.EventBus + logger log.Logger +} + +// New returns an Inspector that serves RPC on the specified BlockStore and StateStore. +// The Inspector type does not modify the state or block stores. +// The sinks are used to enable block and transaction querying via the RPC server. +// The caller is responsible for starting and stopping the Inspector service. +/// +//nolint:lll +func New(cfg *config.RPCConfig, bs state.BlockStore, ss state.Store, es []indexer.EventSink, logger log.Logger) *Inspector { + routes := rpc.Routes(*cfg, ss, bs, es, logger) + eb := types.NewEventBus() + eb.SetLogger(logger.With("module", "events")) + is := indexer.NewIndexerService(es, eb) + is.SetLogger(logger.With("module", "txindex")) + return &Inspector{ + routes: routes, + config: cfg, + logger: logger, + eventBus: eb, + indexerService: is, + } +} + +// NewFromConfig constructs an Inspector using the values defined in the passed in config. +func NewFromConfig(logger log.Logger, cfg *config.Config) (*Inspector, error) { + bsDB, err := config.DefaultDBProvider(&config.DBContext{ID: "blockstore", Config: cfg}) + if err != nil { + return nil, err + } + bs := store.NewBlockStore(bsDB) + sDB, err := config.DefaultDBProvider(&config.DBContext{ID: "state", Config: cfg}) + if err != nil { + return nil, err + } + genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) + if err != nil { + return nil, err + } + sinks, err := sink.EventSinksFromConfig(cfg, config.DefaultDBProvider, genDoc.ChainID) + if err != nil { + return nil, err + } + ss := state.NewStore(sDB) + return New(cfg.RPC, bs, ss, sinks, logger), nil +} + +// Run starts the Inspector servers and blocks until the servers shut down. The passed +// in context is used to control the lifecycle of the servers. +func (ins *Inspector) Run(ctx context.Context) error { + err := ins.eventBus.Start() + if err != nil { + return fmt.Errorf("error starting event bus: %s", err) + } + defer func() { + err := ins.eventBus.Stop() + if err != nil { + ins.logger.Error("event bus stopped with error", "err", err) + } + }() + err = ins.indexerService.Start() + if err != nil { + return fmt.Errorf("error starting indexer service: %s", err) + } + defer func() { + err := ins.indexerService.Stop() + if err != nil { + ins.logger.Error("indexer service stopped with error", "err", err) + } + }() + return startRPCServers(ctx, ins.config, ins.logger, ins.routes) +} + +func startRPCServers(ctx context.Context, cfg *config.RPCConfig, logger log.Logger, routes rpccore.RoutesMap) error { + g, tctx := errgroup.WithContext(ctx) + listenAddrs := tmstrings.SplitAndTrimEmpty(cfg.ListenAddress, ",", " ") + rh := rpc.Handler(cfg, routes, logger) + for _, listenerAddr := range listenAddrs { + server := rpc.Server{ + Logger: logger, + Config: cfg, + Handler: rh, + Addr: listenerAddr, + } + if cfg.IsTLSEnabled() { + keyFile := cfg.KeyFile() + certFile := cfg.CertFile() + listenerAddr := listenerAddr + g.Go(func() error { + logger.Info("RPC HTTPS server starting", "address", listenerAddr, + "certfile", certFile, "keyfile", keyFile) + err := server.ListenAndServeTLS(tctx, certFile, keyFile) + if !errors.Is(err, net.ErrClosed) { + return err + } + logger.Info("RPC HTTPS server stopped", "address", listenerAddr) + return nil + }) + } else { + listenerAddr := listenerAddr + g.Go(func() error { + logger.Info("RPC HTTP server starting", "address", listenerAddr) + err := server.ListenAndServe(tctx) + if !errors.Is(err, net.ErrClosed) { + return err + } + logger.Info("RPC HTTP server stopped", "address", listenerAddr) + return nil + }) + } + } + return g.Wait() +} diff --git a/internal/inspect/inspect_test.go b/internal/inspect/inspect_test.go new file mode 100644 index 000000000..972932440 --- /dev/null +++ b/internal/inspect/inspect_test.go @@ -0,0 +1,587 @@ +package inspect_test + +import ( + "context" + "fmt" + "net" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + abcitypes "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/inspect" + "github.com/tendermint/tendermint/internal/state/indexer" + indexermocks "github.com/tendermint/tendermint/internal/state/indexer/mocks" + statemocks "github.com/tendermint/tendermint/internal/state/mocks" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/proto/tendermint/state" + httpclient "github.com/tendermint/tendermint/rpc/client/http" + "github.com/tendermint/tendermint/types" +) + +func TestInspectConstructor(t *testing.T) { + cfg := config.ResetTestRoot("test") + testLogger := log.TestingLogger() + t.Cleanup(leaktest.Check(t)) + defer func() { _ = os.RemoveAll(cfg.RootDir) }() + t.Run("from config", func(t *testing.T) { + logger := testLogger.With(t.Name()) + d, err := inspect.NewFromConfig(logger, cfg) + require.NoError(t, err) + require.NotNil(t, d) + }) + +} + +func TestInspectRun(t *testing.T) { + cfg := config.ResetTestRoot("test") + testLogger := log.TestingLogger() + t.Cleanup(leaktest.Check(t)) + defer func() { _ = os.RemoveAll(cfg.RootDir) }() + t.Run("from config", func(t *testing.T) { + logger := testLogger.With(t.Name()) + d, err := inspect.NewFromConfig(logger, cfg) + require.NoError(t, err) + ctx, cancel := context.WithCancel(context.Background()) + stoppedWG := &sync.WaitGroup{} + stoppedWG.Add(1) + go func() { + require.NoError(t, d.Run(ctx)) + stoppedWG.Done() + }() + cancel() + stoppedWG.Wait() + }) + +} + +func TestBlock(t *testing.T) { + testHeight := int64(1) + testBlock := new(types.Block) + testBlock.Header.Height = testHeight + testBlock.Header.LastCommitHash = []byte("test hash") + stateStoreMock := &statemocks.Store{} + + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Height").Return(testHeight) + blockStoreMock.On("Base").Return(int64(0)) + blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{}) + blockStoreMock.On("LoadBlock", testHeight).Return(testBlock) + eventSinkMock := &indexermocks.EventSink{} + eventSinkMock.On("Stop").Return(nil) + + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress) + require.NoError(t, err) + resultBlock, err := cli.Block(context.Background(), &testHeight) + require.NoError(t, err) + require.Equal(t, testBlock.Height, resultBlock.Block.Height) + require.Equal(t, testBlock.LastCommitHash, resultBlock.Block.LastCommitHash) + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestTxSearch(t *testing.T) { + testHash := []byte("test") + testTx := []byte("tx") + testQuery := fmt.Sprintf("tx.hash='%s'", string(testHash)) + testTxResult := &abcitypes.TxResult{ + Height: 1, + Index: 100, + Tx: testTx, + } + + stateStoreMock := &statemocks.Store{} + blockStoreMock := &statemocks.BlockStore{} + eventSinkMock := &indexermocks.EventSink{} + eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.KV) + eventSinkMock.On("SearchTxEvents", mock.Anything, + mock.MatchedBy(func(q *query.Query) bool { return testQuery == q.String() })). + Return([]*abcitypes.TxResult{testTxResult}, nil) + + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress) + require.NoError(t, err) + + var page = 1 + resultTxSearch, err := cli.TxSearch(context.Background(), testQuery, false, &page, &page, "") + require.NoError(t, err) + require.Len(t, resultTxSearch.Txs, 1) + require.Equal(t, types.Tx(testTx), resultTxSearch.Txs[0].Tx) + + cancel() + wg.Wait() + + eventSinkMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) + blockStoreMock.AssertExpectations(t) +} +func TestTx(t *testing.T) { + testHash := []byte("test") + testTx := []byte("tx") + + stateStoreMock := &statemocks.Store{} + blockStoreMock := &statemocks.BlockStore{} + eventSinkMock := &indexermocks.EventSink{} + eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.KV) + eventSinkMock.On("GetTxByHash", testHash).Return(&abcitypes.TxResult{ + Tx: testTx, + }, nil) + + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress) + require.NoError(t, err) + + res, err := cli.Tx(context.Background(), testHash, false) + require.NoError(t, err) + require.Equal(t, types.Tx(testTx), res.Tx) + + cancel() + wg.Wait() + + eventSinkMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) + blockStoreMock.AssertExpectations(t) +} +func TestConsensusParams(t *testing.T) { + testHeight := int64(1) + testMaxGas := int64(55) + stateStoreMock := &statemocks.Store{} + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Height").Return(testHeight) + blockStoreMock.On("Base").Return(int64(0)) + stateStoreMock.On("LoadConsensusParams", testHeight).Return(types.ConsensusParams{ + Block: types.BlockParams{ + MaxGas: testMaxGas, + }, + }, nil) + eventSinkMock := &indexermocks.EventSink{} + eventSinkMock.On("Stop").Return(nil) + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress) + require.NoError(t, err) + params, err := cli.ConsensusParams(context.Background(), &testHeight) + require.NoError(t, err) + require.Equal(t, params.ConsensusParams.Block.MaxGas, testMaxGas) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestBlockResults(t *testing.T) { + testHeight := int64(1) + testGasUsed := int64(100) + stateStoreMock := &statemocks.Store{} + // tmstate "github.com/tendermint/tendermint/proto/tendermint/state" + stateStoreMock.On("LoadABCIResponses", testHeight).Return(&state.ABCIResponses{ + DeliverTxs: []*abcitypes.ResponseDeliverTx{ + { + GasUsed: testGasUsed, + }, + }, + EndBlock: &abcitypes.ResponseEndBlock{}, + BeginBlock: &abcitypes.ResponseBeginBlock{}, + }, nil) + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Base").Return(int64(0)) + blockStoreMock.On("Height").Return(testHeight) + eventSinkMock := &indexermocks.EventSink{} + eventSinkMock.On("Stop").Return(nil) + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress) + require.NoError(t, err) + res, err := cli.BlockResults(context.Background(), &testHeight) + require.NoError(t, err) + require.Equal(t, res.TotalGasUsed, testGasUsed) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestCommit(t *testing.T) { + testHeight := int64(1) + testRound := int32(101) + stateStoreMock := &statemocks.Store{} + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Base").Return(int64(0)) + blockStoreMock.On("Height").Return(testHeight) + blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{}, nil) + blockStoreMock.On("LoadSeenCommit").Return(&types.Commit{ + Height: testHeight, + Round: testRound, + }, nil) + eventSinkMock := &indexermocks.EventSink{} + eventSinkMock.On("Stop").Return(nil) + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress) + require.NoError(t, err) + res, err := cli.Commit(context.Background(), &testHeight) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, res.SignedHeader.Commit.Round, testRound) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestBlockByHash(t *testing.T) { + testHeight := int64(1) + testHash := []byte("test hash") + testBlock := new(types.Block) + testBlock.Header.Height = testHeight + testBlock.Header.LastCommitHash = testHash + stateStoreMock := &statemocks.Store{} + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{ + BlockID: types.BlockID{ + Hash: testHash, + }, + Header: types.Header{ + Height: testHeight, + }, + }, nil) + blockStoreMock.On("LoadBlockByHash", testHash).Return(testBlock, nil) + eventSinkMock := &indexermocks.EventSink{} + eventSinkMock.On("Stop").Return(nil) + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress) + require.NoError(t, err) + res, err := cli.BlockByHash(context.Background(), testHash) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, []byte(res.BlockID.Hash), testHash) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestBlockchain(t *testing.T) { + testHeight := int64(1) + testBlock := new(types.Block) + testBlockHash := []byte("test hash") + testBlock.Header.Height = testHeight + testBlock.Header.LastCommitHash = testBlockHash + stateStoreMock := &statemocks.Store{} + + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Height").Return(testHeight) + blockStoreMock.On("Base").Return(int64(0)) + blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{ + BlockID: types.BlockID{ + Hash: testBlockHash, + }, + }) + eventSinkMock := &indexermocks.EventSink{} + eventSinkMock.On("Stop").Return(nil) + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress) + require.NoError(t, err) + res, err := cli.BlockchainInfo(context.Background(), 0, 100) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, testBlockHash, []byte(res.BlockMetas[0].BlockID.Hash)) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestValidators(t *testing.T) { + testHeight := int64(1) + testVotingPower := int64(100) + testValidators := types.ValidatorSet{ + Validators: []*types.Validator{ + { + VotingPower: testVotingPower, + }, + }, + } + stateStoreMock := &statemocks.Store{} + stateStoreMock.On("LoadValidators", testHeight).Return(&testValidators, nil) + + blockStoreMock := &statemocks.BlockStore{} + blockStoreMock.On("Height").Return(testHeight) + blockStoreMock.On("Base").Return(int64(0)) + eventSinkMock := &indexermocks.EventSink{} + eventSinkMock.On("Stop").Return(nil) + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress) + require.NoError(t, err) + + testPage := 1 + testPerPage := 100 + res, err := cli.Validators(context.Background(), &testHeight, &testPage, &testPerPage) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, testVotingPower, res.Validators[0].VotingPower) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func TestBlockSearch(t *testing.T) { + testHeight := int64(1) + testBlockHash := []byte("test hash") + testQuery := "block.height = 1" + stateStoreMock := &statemocks.Store{} + + blockStoreMock := &statemocks.BlockStore{} + eventSinkMock := &indexermocks.EventSink{} + eventSinkMock.On("Stop").Return(nil) + eventSinkMock.On("Type").Return(indexer.KV) + blockStoreMock.On("LoadBlock", testHeight).Return(&types.Block{ + Header: types.Header{ + Height: testHeight, + }, + }, nil) + blockStoreMock.On("LoadBlockMeta", testHeight).Return(&types.BlockMeta{ + BlockID: types.BlockID{ + Hash: testBlockHash, + }, + }) + eventSinkMock.On("SearchBlockEvents", mock.Anything, + mock.MatchedBy(func(q *query.Query) bool { return testQuery == q.String() })). + Return([]int64{testHeight}, nil) + rpcConfig := config.TestRPCConfig() + l := log.TestingLogger() + d := inspect.New(rpcConfig, blockStoreMock, stateStoreMock, []indexer.EventSink{eventSinkMock}, l) + + ctx, cancel := context.WithCancel(context.Background()) + wg := &sync.WaitGroup{} + wg.Add(1) + + startedWG := &sync.WaitGroup{} + startedWG.Add(1) + go func() { + startedWG.Done() + defer wg.Done() + require.NoError(t, d.Run(ctx)) + }() + // FIXME: used to induce context switch. + // Determine more deterministic method for prompting a context switch + startedWG.Wait() + requireConnect(t, rpcConfig.ListenAddress, 20) + cli, err := httpclient.New(rpcConfig.ListenAddress) + require.NoError(t, err) + + testPage := 1 + testPerPage := 100 + testOrderBy := "desc" + res, err := cli.BlockSearch(context.Background(), testQuery, &testPage, &testPerPage, testOrderBy) + require.NoError(t, err) + require.NotNil(t, res) + require.Equal(t, testBlockHash, []byte(res.Blocks[0].BlockID.Hash)) + + cancel() + wg.Wait() + + blockStoreMock.AssertExpectations(t) + stateStoreMock.AssertExpectations(t) +} + +func requireConnect(t testing.TB, addr string, retries int) { + parts := strings.SplitN(addr, "://", 2) + if len(parts) != 2 { + t.Fatalf("malformed address to dial: %s", addr) + } + var err error + for i := 0; i < retries; i++ { + var conn net.Conn + conn, err = net.Dial(parts[0], parts[1]) + if err == nil { + conn.Close() + return + } + // FIXME attempt to yield and let the other goroutine continue execution. + time.Sleep(time.Microsecond * 100) + } + t.Fatalf("unable to connect to server %s after %d tries: %s", addr, retries, err) +} diff --git a/internal/inspect/rpc/rpc.go b/internal/inspect/rpc/rpc.go new file mode 100644 index 000000000..3043ba6b3 --- /dev/null +++ b/internal/inspect/rpc/rpc.go @@ -0,0 +1,143 @@ +package rpc + +import ( + "context" + "net/http" + "time" + + "github.com/rs/cors" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/rpc/core" + "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/rpc/jsonrpc/server" + "github.com/tendermint/tendermint/types" +) + +// Server defines parameters for running an Inspector rpc server. +type Server struct { + Addr string // TCP address to listen on, ":http" if empty + Handler http.Handler + Logger log.Logger + Config *config.RPCConfig +} + +// Routes returns the set of routes used by the Inspector server. +// +//nolint: lll +func Routes(cfg config.RPCConfig, s state.Store, bs state.BlockStore, es []indexer.EventSink, logger log.Logger) core.RoutesMap { + env := &core.Environment{ + Config: cfg, + EventSinks: es, + StateStore: s, + BlockStore: bs, + ConsensusReactor: waitSyncCheckerImpl{}, + Logger: logger, + } + return core.RoutesMap{ + "blockchain": server.NewRPCFunc(env.BlockchainInfo, "minHeight,maxHeight", true), + "consensus_params": server.NewRPCFunc(env.ConsensusParams, "height", true), + "block": server.NewRPCFunc(env.Block, "height", true), + "block_by_hash": server.NewRPCFunc(env.BlockByHash, "hash", true), + "block_results": server.NewRPCFunc(env.BlockResults, "height", true), + "commit": server.NewRPCFunc(env.Commit, "height", true), + "validators": server.NewRPCFunc(env.Validators, "height,page,per_page", true), + "tx": server.NewRPCFunc(env.Tx, "hash,prove", true), + "tx_search": server.NewRPCFunc(env.TxSearch, "query,prove,page,per_page,order_by", false), + "block_search": server.NewRPCFunc(env.BlockSearch, "query,page,per_page,order_by", false), + } +} + +// Handler returns the http.Handler configured for use with an Inspector server. Handler +// registers the routes on the http.Handler and also registers the websocket handler +// and the CORS handler if specified by the configuration options. +func Handler(rpcConfig *config.RPCConfig, routes core.RoutesMap, logger log.Logger) http.Handler { + mux := http.NewServeMux() + wmLogger := logger.With("protocol", "websocket") + + var eventBus types.EventBusSubscriber + + websocketDisconnectFn := func(remoteAddr string) { + err := eventBus.UnsubscribeAll(context.Background(), remoteAddr) + if err != nil && err != pubsub.ErrSubscriptionNotFound { + wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err) + } + } + wm := server.NewWebsocketManager(routes, + server.OnDisconnect(websocketDisconnectFn), + server.ReadLimit(rpcConfig.MaxBodyBytes)) + wm.SetLogger(wmLogger) + mux.HandleFunc("/websocket", wm.WebsocketHandler) + + server.RegisterRPCFuncs(mux, routes, logger) + var rootHandler http.Handler = mux + if rpcConfig.IsCorsEnabled() { + rootHandler = addCORSHandler(rpcConfig, mux) + } + return rootHandler +} + +func addCORSHandler(rpcConfig *config.RPCConfig, h http.Handler) http.Handler { + corsMiddleware := cors.New(cors.Options{ + AllowedOrigins: rpcConfig.CORSAllowedOrigins, + AllowedMethods: rpcConfig.CORSAllowedMethods, + AllowedHeaders: rpcConfig.CORSAllowedHeaders, + }) + h = corsMiddleware.Handler(h) + return h +} + +type waitSyncCheckerImpl struct{} + +func (waitSyncCheckerImpl) WaitSync() bool { + return false +} + +func (waitSyncCheckerImpl) GetPeerState(peerID types.NodeID) (*consensus.PeerState, bool) { + return nil, false +} + +// ListenAndServe listens on the address specified in srv.Addr and handles any +// incoming requests over HTTP using the Inspector rpc handler specified on the server. +func (srv *Server) ListenAndServe(ctx context.Context) error { + listener, err := server.Listen(srv.Addr, srv.Config.MaxOpenConnections) + if err != nil { + return err + } + go func() { + <-ctx.Done() + listener.Close() + }() + return server.Serve(listener, srv.Handler, srv.Logger, serverRPCConfig(srv.Config)) +} + +// ListenAndServeTLS listens on the address specified in srv.Addr. ListenAndServeTLS handles +// incoming requests over HTTPS using the Inspector rpc handler specified on the server. +func (srv *Server) ListenAndServeTLS(ctx context.Context, certFile, keyFile string) error { + listener, err := server.Listen(srv.Addr, srv.Config.MaxOpenConnections) + if err != nil { + return err + } + go func() { + <-ctx.Done() + listener.Close() + }() + return server.ServeTLS(listener, srv.Handler, certFile, keyFile, srv.Logger, serverRPCConfig(srv.Config)) +} + +func serverRPCConfig(r *config.RPCConfig) *server.Config { + cfg := server.DefaultConfig() + cfg.MaxBodyBytes = r.MaxBodyBytes + cfg.MaxHeaderBytes = r.MaxHeaderBytes + // If necessary adjust global WriteTimeout to ensure it's greater than + // TimeoutBroadcastTxCommit. + // See https://github.com/tendermint/tendermint/issues/3435 + if cfg.WriteTimeout <= r.TimeoutBroadcastTxCommit { + cfg.WriteTimeout = r.TimeoutBroadcastTxCommit + 1*time.Second + } + return cfg +} diff --git a/internal/libs/clist/clist_property_test.go b/internal/libs/clist/clist_property_test.go new file mode 100644 index 000000000..cdc173ee5 --- /dev/null +++ b/internal/libs/clist/clist_property_test.go @@ -0,0 +1,72 @@ +package clist_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + "pgregory.net/rapid" + + "github.com/tendermint/tendermint/internal/libs/clist" +) + +func TestCListProperties(t *testing.T) { + rapid.Check(t, rapid.Run(&clistModel{})) +} + +// clistModel is used by the rapid state machine testing framework. +// clistModel contains both the clist that is being tested and a slice of *clist.CElements +// that will be used to model the expected clist behavior. +type clistModel struct { + clist *clist.CList + + model []*clist.CElement +} + +// Init is a method used by the rapid state machine testing library. +// Init is called when the test starts to initialize the data that will be used +// in the state machine test. +func (m *clistModel) Init(t *rapid.T) { + m.clist = clist.New() + m.model = []*clist.CElement{} +} + +// PushBack defines an action that will be randomly selected across by the rapid state +// machines testing library. Every call to PushBack calls PushBack on the clist and +// performs a similar action on the model data. +func (m *clistModel) PushBack(t *rapid.T) { + value := rapid.String().Draw(t, "value").(string) + el := m.clist.PushBack(value) + m.model = append(m.model, el) +} + +// Remove defines an action that will be randomly selected across by the rapid state +// machine testing library. Every call to Remove selects an element from the model +// and calls Remove on the CList with that element. The same element is removed from +// the model to keep the objects in sync. +func (m *clistModel) Remove(t *rapid.T) { + if len(m.model) == 0 { + return + } + ix := rapid.IntRange(0, len(m.model)-1).Draw(t, "index").(int) + value := m.model[ix] + m.model = append(m.model[:ix], m.model[ix+1:]...) + m.clist.Remove(value) +} + +// Check is a method required by the rapid state machine testing library. +// Check is run after each action and is used to verify that the state of the object, +// in this case a clist.CList matches the state of the objec. +func (m *clistModel) Check(t *rapid.T) { + require.Equal(t, len(m.model), m.clist.Len()) + if len(m.model) == 0 { + return + } + require.Equal(t, m.model[0], m.clist.Front()) + require.Equal(t, m.model[len(m.model)-1], m.clist.Back()) + + iter := m.clist.Front() + for _, val := range m.model { + require.Equal(t, val, iter) + iter = iter.Next() + } +} diff --git a/internal/libs/protoio/io_test.go b/internal/libs/protoio/io_test.go index 2f1437c68..a84b34c00 100644 --- a/internal/libs/protoio/io_test.go +++ b/internal/libs/protoio/io_test.go @@ -71,7 +71,7 @@ func iotest(writer protoio.WriteCloser, reader protoio.ReadCloser) error { return err } if n != len(bz)+visize { - return fmt.Errorf("WriteMsg() wrote %v bytes, expected %v", n, len(bz)+visize) // nolint + return fmt.Errorf("WriteMsg() wrote %v bytes, expected %v", n, len(bz)+visize) } lens[i] = n } diff --git a/internal/libs/sync/deadlock.go b/internal/libs/sync/deadlock.go index 637d6fbb1..21b5130ba 100644 --- a/internal/libs/sync/deadlock.go +++ b/internal/libs/sync/deadlock.go @@ -1,3 +1,4 @@ +//go:build deadlock // +build deadlock package sync diff --git a/internal/libs/sync/sync.go b/internal/libs/sync/sync.go index a0880e7de..c6e7101c6 100644 --- a/internal/libs/sync/sync.go +++ b/internal/libs/sync/sync.go @@ -1,3 +1,4 @@ +//go:build !deadlock // +build !deadlock package sync diff --git a/internal/mempool/cache.go b/internal/mempool/cache.go index 43174f106..3cd45d2bc 100644 --- a/internal/mempool/cache.go +++ b/internal/mempool/cache.go @@ -31,14 +31,14 @@ var _ TxCache = (*LRUTxCache)(nil) type LRUTxCache struct { mtx tmsync.Mutex size int - cacheMap map[[TxKeySize]byte]*list.Element + cacheMap map[types.TxKey]*list.Element list *list.List } func NewLRUTxCache(cacheSize int) *LRUTxCache { return &LRUTxCache{ size: cacheSize, - cacheMap: make(map[[TxKeySize]byte]*list.Element, cacheSize), + cacheMap: make(map[types.TxKey]*list.Element, cacheSize), list: list.New(), } } @@ -53,7 +53,7 @@ func (c *LRUTxCache) Reset() { c.mtx.Lock() defer c.mtx.Unlock() - c.cacheMap = make(map[[TxKeySize]byte]*list.Element, c.size) + c.cacheMap = make(map[types.TxKey]*list.Element, c.size) c.list.Init() } @@ -61,7 +61,7 @@ func (c *LRUTxCache) Push(tx types.Tx) bool { c.mtx.Lock() defer c.mtx.Unlock() - key := TxKey(tx) + key := tx.Key() moved, ok := c.cacheMap[key] if ok { @@ -72,7 +72,7 @@ func (c *LRUTxCache) Push(tx types.Tx) bool { if c.list.Len() >= c.size { front := c.list.Front() if front != nil { - frontKey := front.Value.([TxKeySize]byte) + frontKey := front.Value.(types.TxKey) delete(c.cacheMap, frontKey) c.list.Remove(front) } @@ -88,7 +88,7 @@ func (c *LRUTxCache) Remove(tx types.Tx) { c.mtx.Lock() defer c.mtx.Unlock() - key := TxKey(tx) + key := tx.Key() e := c.cacheMap[key] delete(c.cacheMap, key) diff --git a/internal/mempool/ids.go b/internal/mempool/ids.go index 49a9ac607..656f5b74c 100644 --- a/internal/mempool/ids.go +++ b/internal/mempool/ids.go @@ -7,17 +7,15 @@ import ( "github.com/tendermint/tendermint/types" ) -// nolint: golint -// TODO: Rename type. -type MempoolIDs struct { +type IDs struct { mtx tmsync.RWMutex peerMap map[types.NodeID]uint16 nextID uint16 // assumes that a node will never have over 65536 active peers activeIDs map[uint16]struct{} // used to check if a given peerID key is used } -func NewMempoolIDs() *MempoolIDs { - return &MempoolIDs{ +func NewMempoolIDs() *IDs { + return &IDs{ peerMap: make(map[types.NodeID]uint16), // reserve UnknownPeerID for mempoolReactor.BroadcastTx @@ -28,7 +26,7 @@ func NewMempoolIDs() *MempoolIDs { // ReserveForPeer searches for the next unused ID and assigns it to the provided // peer. -func (ids *MempoolIDs) ReserveForPeer(peerID types.NodeID) { +func (ids *IDs) ReserveForPeer(peerID types.NodeID) { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -38,7 +36,7 @@ func (ids *MempoolIDs) ReserveForPeer(peerID types.NodeID) { } // Reclaim returns the ID reserved for the peer back to unused pool. -func (ids *MempoolIDs) Reclaim(peerID types.NodeID) { +func (ids *IDs) Reclaim(peerID types.NodeID) { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -50,7 +48,7 @@ func (ids *MempoolIDs) Reclaim(peerID types.NodeID) { } // GetForPeer returns an ID reserved for the peer. -func (ids *MempoolIDs) GetForPeer(peerID types.NodeID) uint16 { +func (ids *IDs) GetForPeer(peerID types.NodeID) uint16 { ids.mtx.RLock() defer ids.mtx.RUnlock() @@ -59,7 +57,7 @@ func (ids *MempoolIDs) GetForPeer(peerID types.NodeID) uint16 { // nextPeerID returns the next unused peer ID to use. We assume that the mutex // is already held. -func (ids *MempoolIDs) nextPeerID() uint16 { +func (ids *IDs) nextPeerID() uint16 { if len(ids.activeIDs) == MaxActiveIDs { panic(fmt.Sprintf("node has maximum %d active IDs and wanted to get one more", MaxActiveIDs)) } diff --git a/internal/mempool/mempool.go b/internal/mempool/mempool.go index d679b3506..6e3955dc3 100644 --- a/internal/mempool/mempool.go +++ b/internal/mempool/mempool.go @@ -32,6 +32,10 @@ type Mempool interface { // its validity and whether it should be added to the mempool. CheckTx(ctx context.Context, tx types.Tx, callback func(*abci.Response), txInfo TxInfo) error + // RemoveTxByKey removes a transaction, identified by its key, + // from the mempool. + RemoveTxByKey(txKey types.TxKey) error + // ReapMaxBytesMaxGas reaps transactions from the mempool up to maxBytes // bytes total with the condition that the total gasWanted must be less than // maxGas. diff --git a/internal/mempool/mock/mempool.go b/internal/mempool/mock/mempool.go index 5749d2d3c..8e6f0c7bf 100644 --- a/internal/mempool/mock/mempool.go +++ b/internal/mempool/mock/mempool.go @@ -5,29 +5,30 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/libs/clist" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/types" ) // Mempool is an empty implementation of a Mempool, useful for testing. type Mempool struct{} -var _ mempl.Mempool = Mempool{} +var _ mempool.Mempool = Mempool{} func (Mempool) Lock() {} func (Mempool) Unlock() {} func (Mempool) Size() int { return 0 } -func (Mempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error { +func (Mempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempool.TxInfo) error { return nil } +func (Mempool) RemoveTxByKey(txKey types.TxKey) error { return nil } func (Mempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } func (Mempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } func (Mempool) Update( _ int64, _ types.Txs, _ []*abci.ResponseDeliverTx, - _ mempl.PreCheckFunc, - _ mempl.PostCheckFunc, + _ mempool.PreCheckFunc, + _ mempool.PostCheckFunc, ) error { return nil } diff --git a/internal/mempool/tx.go b/internal/mempool/tx.go index 860d3d3b4..adafdf85e 100644 --- a/internal/mempool/tx.go +++ b/internal/mempool/tx.go @@ -1,24 +1,9 @@ package mempool import ( - "crypto/sha256" - "github.com/tendermint/tendermint/types" ) -// TxKeySize defines the size of the transaction's key used for indexing. -const TxKeySize = sha256.Size - -// TxKey is the fixed length array key used as an index. -func TxKey(tx types.Tx) [TxKeySize]byte { - return sha256.Sum256(tx) -} - -// TxHashFromBytes returns the hash of a transaction from raw bytes. -func TxHashFromBytes(tx []byte) []byte { - return types.Tx(tx).Hash() -} - // TxInfo are parameters that get passed when attempting to add a tx to the // mempool. type TxInfo struct { diff --git a/internal/mempool/v0/bench_test.go b/internal/mempool/v0/bench_test.go index 45123c9f6..acfaec283 100644 --- a/internal/mempool/v0/bench_test.go +++ b/internal/mempool/v0/bench_test.go @@ -6,14 +6,14 @@ import ( "sync/atomic" "testing" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/proxy" ) func BenchmarkReap(b *testing.B) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mp, cleanup := newMempoolWithApp(cc) defer cleanup() mp.config.Size = 100000 @@ -34,7 +34,7 @@ func BenchmarkReap(b *testing.B) { func BenchmarkCheckTx(b *testing.B) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mp, cleanup := newMempoolWithApp(cc) defer cleanup() @@ -56,7 +56,7 @@ func BenchmarkCheckTx(b *testing.B) { func BenchmarkParallelCheckTx(b *testing.B) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mp, cleanup := newMempoolWithApp(cc) defer cleanup() @@ -81,7 +81,7 @@ func BenchmarkParallelCheckTx(b *testing.B) { func BenchmarkCheckDuplicateTx(b *testing.B) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mp, cleanup := newMempoolWithApp(cc) defer cleanup() diff --git a/internal/mempool/v0/cache_test.go b/internal/mempool/v0/cache_test.go index fbb719231..5bf2c7603 100644 --- a/internal/mempool/v0/cache_test.go +++ b/internal/mempool/v0/cache_test.go @@ -7,16 +7,16 @@ import ( "github.com/stretchr/testify/require" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) func TestCacheAfterUpdate(t *testing.T) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mp, cleanup := newMempoolWithApp(cc) defer cleanup() @@ -61,7 +61,7 @@ func TestCacheAfterUpdate(t *testing.T) { require.NotEqual(t, len(tc.txsInCache), counter, "cache larger than expected on testcase %d", tcIndex) - nodeVal := node.Value.([sha256.Size]byte) + nodeVal := node.Value.(types.TxKey) expectedBz := sha256.Sum256([]byte{byte(tc.txsInCache[len(tc.txsInCache)-counter-1])}) // Reference for reading the errors: // >>> sha256('\x00').hexdigest() @@ -71,7 +71,7 @@ func TestCacheAfterUpdate(t *testing.T) { // >>> sha256('\x02').hexdigest() // 'dbc1b4c900ffe48d575b5da5c638040125f65db0fe3e24494b76ea986457d986' - require.Equal(t, expectedBz, nodeVal, "Equality failed on index %d, tc %d", counter, tcIndex) + require.EqualValues(t, expectedBz, nodeVal, "Equality failed on index %d, tc %d", counter, tcIndex) counter++ node = node.Next() } diff --git a/internal/mempool/v0/clist_mempool.go b/internal/mempool/v0/clist_mempool.go index 40e93cc13..7816730c1 100644 --- a/internal/mempool/v0/clist_mempool.go +++ b/internal/mempool/v0/clist_mempool.go @@ -3,19 +3,19 @@ package v0 import ( "bytes" "context" + "errors" "fmt" "sync" "sync/atomic" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" - pubmempool "github.com/tendermint/tendermint/pkg/mempool" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -33,7 +33,7 @@ type CListMempool struct { notifiedTxsAvailable bool txsAvailable chan struct{} // fires once for each height, when the mempool is not empty - config *cfg.MempoolConfig + config *config.MempoolConfig // Exclusive mutex for Update method to prevent concurrent execution of // CheckTx or ReapMaxBytesMaxGas(ReapMaxTxs) methods. @@ -70,14 +70,14 @@ type CListMempoolOption func(*CListMempool) // NewCListMempool returns a new mempool with the given configuration and // connection to an application. func NewCListMempool( - config *cfg.MempoolConfig, + cfg *config.MempoolConfig, proxyAppConn proxy.AppConnMempool, height int64, options ...CListMempoolOption, ) *CListMempool { mp := &CListMempool{ - config: config, + config: cfg, proxyAppConn: proxyAppConn, txs: clist.New(), height: height, @@ -87,8 +87,8 @@ func NewCListMempool( metrics: mempool.NopMetrics(), } - if config.CacheSize > 0 { - mp.cache = mempool.NewLRUTxCache(config.CacheSize) + if cfg.CacheSize > 0 { + mp.cache = mempool.NewLRUTxCache(cfg.CacheSize) } else { mp.cache = mempool.NopTxCache{} } @@ -217,7 +217,7 @@ func (mem *CListMempool) CheckTx( } if txSize > mem.config.MaxTxBytes { - return pubmempool.ErrTxTooLarge{ + return types.ErrTxTooLarge{ Max: mem.config.MaxTxBytes, Actual: txSize, } @@ -225,7 +225,7 @@ func (mem *CListMempool) CheckTx( if mem.preCheck != nil { if err := mem.preCheck(tx); err != nil { - return pubmempool.ErrPreCheck{ + return types.ErrPreCheck{ Reason: err, } } @@ -241,14 +241,14 @@ func (mem *CListMempool) CheckTx( // Note it's possible a tx is still in the cache but no longer in the mempool // (eg. after committing a block, txs are removed from mempool but not cache), // so we only record the sender for txs still in the mempool. - if e, ok := mem.txsMap.Load(mempool.TxKey(tx)); ok { + if e, ok := mem.txsMap.Load(tx.Key()); ok { memTx := e.(*clist.CElement).Value.(*mempoolTx) _, loaded := memTx.senders.LoadOrStore(txInfo.SenderID, true) // TODO: consider punishing peer for dups, // its non-trivial since invalid txs can become valid, // but they can spam the same tx with little cost to them atm. if loaded { - return pubmempool.ErrTxInCache + return types.ErrTxInCache } } @@ -328,7 +328,7 @@ func (mem *CListMempool) reqResCb( // - resCbFirstTime (lock not held) if tx is valid func (mem *CListMempool) addTx(memTx *mempoolTx) { e := mem.txs.PushBack(memTx) - mem.txsMap.Store(mempool.TxKey(memTx.tx), e) + mem.txsMap.Store(memTx.tx.Key(), e) atomic.AddInt64(&mem.txsBytes, int64(len(memTx.tx))) mem.metrics.TxSizeBytes.Observe(float64(len(memTx.tx))) } @@ -339,7 +339,7 @@ func (mem *CListMempool) addTx(memTx *mempoolTx) { func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromCache bool) { mem.txs.Remove(elem) elem.DetachPrev() - mem.txsMap.Delete(mempool.TxKey(tx)) + mem.txsMap.Delete(tx.Key()) atomic.AddInt64(&mem.txsBytes, int64(-len(tx))) if removeFromCache { @@ -348,13 +348,16 @@ func (mem *CListMempool) removeTx(tx types.Tx, elem *clist.CElement, removeFromC } // RemoveTxByKey removes a transaction from the mempool by its TxKey index. -func (mem *CListMempool) RemoveTxByKey(txKey [mempool.TxKeySize]byte, removeFromCache bool) { +func (mem *CListMempool) RemoveTxByKey(txKey types.TxKey) error { if e, ok := mem.txsMap.Load(txKey); ok { memTx := e.(*clist.CElement).Value.(*mempoolTx) if memTx != nil { - mem.removeTx(memTx.tx, e.(*clist.CElement), removeFromCache) + mem.removeTx(memTx.tx, e.(*clist.CElement), false) + return nil } + return errors.New("transaction not found") } + return errors.New("invalid transaction found") } func (mem *CListMempool) isFull(txSize int) error { @@ -364,7 +367,7 @@ func (mem *CListMempool) isFull(txSize int) error { ) if memSize >= mem.config.Size || int64(txSize)+txsBytes > mem.config.MaxTxsBytes { - return pubmempool.ErrMempoolIsFull{ + return types.ErrMempoolIsFull{ NumTxs: memSize, MaxTxs: mem.config.Size, TxsBytes: txsBytes, @@ -410,7 +413,7 @@ func (mem *CListMempool) resCbFirstTime( mem.addTx(memTx) mem.logger.Debug( "added good transaction", - "tx", mempool.TxHashFromBytes(tx), + "tx", types.Tx(tx).Hash(), "res", r, "height", memTx.height, "total", mem.Size(), @@ -420,7 +423,7 @@ func (mem *CListMempool) resCbFirstTime( // ignore bad transaction mem.logger.Debug( "rejected bad transaction", - "tx", mempool.TxHashFromBytes(tx), + "tx", types.Tx(tx).Hash(), "peerID", peerP2PID, "res", r, "err", postCheckErr, @@ -461,7 +464,7 @@ func (mem *CListMempool) resCbRecheck(req *abci.Request, res *abci.Response) { // Good, nothing to do. } else { // Tx became invalidated due to newly committed block. - mem.logger.Debug("tx is no longer valid", "tx", mempool.TxHashFromBytes(tx), "res", r, "err", postCheckErr) + mem.logger.Debug("tx is no longer valid", "tx", types.Tx(tx).Hash(), "res", r, "err", postCheckErr) // NOTE: we remove tx from the cache because it might be good later mem.removeTx(tx, mem.recheckCursor, !mem.config.KeepInvalidTxsInCache) } @@ -599,7 +602,7 @@ func (mem *CListMempool) Update( // Mempool after: // 100 // https://github.com/tendermint/tendermint/issues/3322. - if e, ok := mem.txsMap.Load(mempool.TxKey(tx)); ok { + if e, ok := mem.txsMap.Load(tx.Key()); ok { mem.removeTx(tx, e.(*clist.CElement), false) } } diff --git a/internal/mempool/v0/clist_mempool_test.go b/internal/mempool/v0/clist_mempool_test.go index 65a1b123e..b61a8333e 100644 --- a/internal/mempool/v0/clist_mempool_test.go +++ b/internal/mempool/v0/clist_mempool_test.go @@ -15,16 +15,15 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abciserver "github.com/tendermint/tendermint/abci/server" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/libs/service" - pubmempool "github.com/tendermint/tendermint/pkg/mempool" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -32,22 +31,22 @@ import ( // test. type cleanupFunc func() -func newMempoolWithApp(cc proxy.ClientCreator) (*CListMempool, cleanupFunc) { - return newMempoolWithAppAndConfig(cc, cfg.ResetTestRoot("mempool_test")) +func newMempoolWithApp(cc abciclient.Creator) (*CListMempool, cleanupFunc) { + return newMempoolWithAppAndConfig(cc, config.ResetTestRoot("mempool_test")) } -func newMempoolWithAppAndConfig(cc proxy.ClientCreator, config *cfg.Config) (*CListMempool, cleanupFunc) { - appConnMem, _ := cc.NewABCIClient() +func newMempoolWithAppAndConfig(cc abciclient.Creator, cfg *config.Config) (*CListMempool, cleanupFunc) { + appConnMem, _ := cc() appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) err := appConnMem.Start() if err != nil { panic(err) } - mp := NewCListMempool(config.Mempool, appConnMem, 0) + mp := NewCListMempool(cfg.Mempool, appConnMem, 0) mp.SetLogger(log.TestingLogger()) - return mp, func() { os.RemoveAll(config.RootDir) } + return mp, func() { os.RemoveAll(cfg.RootDir) } } func ensureNoFire(t *testing.T, ch <-chan struct{}, timeoutMS int) { @@ -82,7 +81,7 @@ func checkTxs(t *testing.T, mp mempool.Mempool, count int, peerID uint16) types. // Skip invalid txs. // TestMempoolFilters will fail otherwise. It asserts a number of txs // returned. - if pubmempool.IsPreCheckError(err) { + if types.IsPreCheckError(err) { continue } t.Fatalf("CheckTx failed: %v while checking #%d tx", err, i) @@ -93,7 +92,7 @@ func checkTxs(t *testing.T, mp mempool.Mempool, count int, peerID uint16) types. func TestReapMaxBytesMaxGas(t *testing.T) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mp, cleanup := newMempoolWithApp(cc) defer cleanup() @@ -142,7 +141,7 @@ func TestReapMaxBytesMaxGas(t *testing.T) { func TestMempoolFilters(t *testing.T) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mp, cleanup := newMempoolWithApp(cc) defer cleanup() emptyTxArr := []types.Tx{[]byte{}} @@ -181,7 +180,7 @@ func TestMempoolFilters(t *testing.T) { func TestMempoolUpdate(t *testing.T) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mp, cleanup := newMempoolWithApp(cc) defer cleanup() @@ -217,8 +216,8 @@ func TestMempoolUpdate(t *testing.T) { func TestMempool_KeepInvalidTxsInCache(t *testing.T) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - wcfg := cfg.DefaultConfig() + cc := abciclient.NewLocalCreator(app) + wcfg := config.DefaultConfig() wcfg.Mempool.KeepInvalidTxsInCache = true mp, cleanup := newMempoolWithAppAndConfig(cc, wcfg) defer cleanup() @@ -265,7 +264,7 @@ func TestMempool_KeepInvalidTxsInCache(t *testing.T) { func TestTxsAvailable(t *testing.T) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mp, cleanup := newMempoolWithApp(cc) defer cleanup() mp.EnableTxsAvailable() @@ -309,12 +308,12 @@ func TestTxsAvailable(t *testing.T) { func TestSerialReap(t *testing.T) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mp, cleanup := newMempoolWithApp(cc) defer cleanup() - appConnCon, _ := cc.NewABCIClient() + appConnCon, _ := cc() appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) err := appConnCon.Start() require.Nil(t, err) @@ -420,7 +419,7 @@ func TestSerialReap(t *testing.T) { func TestMempool_CheckTxChecksTxSize(t *testing.T) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) mempl, cleanup := newMempoolWithApp(cc) defer cleanup() @@ -455,7 +454,7 @@ func TestMempool_CheckTxChecksTxSize(t *testing.T) { if !testCase.err { require.NoError(t, err, caseString) } else { - require.Equal(t, err, pubmempool.ErrTxTooLarge{ + require.Equal(t, err, types.ErrTxTooLarge{ Max: maxTxSize, Actual: testCase.len, }, caseString) @@ -465,10 +464,10 @@ func TestMempool_CheckTxChecksTxSize(t *testing.T) { func TestMempoolTxsBytes(t *testing.T) { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - config := cfg.ResetTestRoot("mempool_test") - config.Mempool.MaxTxsBytes = 10 - mp, cleanup := newMempoolWithAppAndConfig(cc, config) + cc := abciclient.NewLocalCreator(app) + cfg := config.ResetTestRoot("mempool_test") + cfg.Mempool.MaxTxsBytes = 10 + mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) defer cleanup() // 1. zero by default @@ -503,12 +502,12 @@ func TestMempoolTxsBytes(t *testing.T) { err = mp.CheckTx(context.Background(), []byte{0x05}, nil, mempool.TxInfo{}) if assert.Error(t, err) { - assert.IsType(t, pubmempool.ErrMempoolIsFull{}, err) + assert.IsType(t, types.ErrMempoolIsFull{}, err) } // 6. zero after tx is rechecked and removed due to not being valid anymore app2 := kvstore.NewApplication() - cc = proxy.NewLocalClientCreator(app2) + cc = abciclient.NewLocalCreator(app2) mp, cleanup = newMempoolWithApp(cc) defer cleanup() @@ -519,7 +518,7 @@ func TestMempoolTxsBytes(t *testing.T) { require.NoError(t, err) assert.EqualValues(t, 8, mp.SizeBytes()) - appConnCon, _ := cc.NewABCIClient() + appConnCon, _ := cc() appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) err = appConnCon.Start() require.Nil(t, err) @@ -545,9 +544,9 @@ func TestMempoolTxsBytes(t *testing.T) { err = mp.CheckTx(context.Background(), []byte{0x06}, nil, mempool.TxInfo{}) require.NoError(t, err) assert.EqualValues(t, 9, mp.SizeBytes()) - mp.RemoveTxByKey(mempool.TxKey([]byte{0x07}), true) + assert.Error(t, mp.RemoveTxByKey(types.Tx([]byte{0x07}).Key())) assert.EqualValues(t, 9, mp.SizeBytes()) - mp.RemoveTxByKey(mempool.TxKey([]byte{0x06}), true) + assert.NoError(t, mp.RemoveTxByKey(types.Tx([]byte{0x06}).Key())) assert.EqualValues(t, 8, mp.SizeBytes()) } @@ -565,8 +564,8 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) { t.Error(err) } }) - config := cfg.ResetTestRoot("mempool_test") - mp, cleanup := newMempoolWithAppAndConfig(cc, config) + cfg := config.ResetTestRoot("mempool_test") + mp, cleanup := newMempoolWithAppAndConfig(cc, cfg) defer cleanup() // generate small number of txs @@ -578,7 +577,7 @@ func TestMempoolRemoteAppConcurrency(t *testing.T) { } // simulate a group of peers sending them over and over - N := config.Mempool.Size + N := cfg.Mempool.Size maxPeers := 5 for i := 0; i < N; i++ { peerID := mrand.Intn(maxPeers) @@ -598,10 +597,10 @@ func newRemoteApp( addr string, app abci.Application, ) ( - clientCreator proxy.ClientCreator, + clientCreator abciclient.Creator, server service.Service, ) { - clientCreator = proxy.NewRemoteClientCreator(addr, "socket", true) + clientCreator = abciclient.NewRemoteCreator(addr, "socket", true) // Start server server = abciserver.NewSocketServer(addr, app) diff --git a/internal/mempool/v0/reactor.go b/internal/mempool/v0/reactor.go index 29dec5833..118321645 100644 --- a/internal/mempool/v0/reactor.go +++ b/internal/mempool/v0/reactor.go @@ -8,7 +8,7 @@ import ( "sync" "time" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" @@ -37,9 +37,9 @@ type PeerManager interface { type Reactor struct { service.BaseService - config *cfg.MempoolConfig + cfg *config.MempoolConfig mempool *CListMempool - ids *mempool.MempoolIDs + ids *mempool.IDs // XXX: Currently, this is the only way to get information about a peer. Ideally, // we rely on message-oriented communication to get necessary peer data. @@ -61,7 +61,7 @@ type Reactor struct { // NewReactor returns a reference to a new reactor. func NewReactor( logger log.Logger, - config *cfg.MempoolConfig, + cfg *config.MempoolConfig, peerMgr PeerManager, mp *CListMempool, mempoolCh *p2p.Channel, @@ -69,7 +69,7 @@ func NewReactor( ) *Reactor { r := &Reactor{ - config: config, + cfg: cfg, peerMgr: peerMgr, mempool: mp, ids: mempool.NewMempoolIDs(), @@ -83,32 +83,22 @@ func NewReactor( return r } -// GetChannelShims returns a map of ChannelDescriptorShim objects, where each -// object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding -// p2p proto.Message the new p2p Channel is responsible for handling. -// -// -// TODO: Remove once p2p refactor is complete. -// ref: https://github.com/tendermint/tendermint/issues/5670 -func GetChannelShims(config *cfg.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDescriptorShim { - largestTx := make([]byte, config.MaxTxBytes) +// GetChannelDescriptor produces an instance of a descriptor for this +// package's required channels. +func GetChannelDescriptor(cfg *config.MempoolConfig) *p2p.ChannelDescriptor { + largestTx := make([]byte, cfg.MaxTxBytes) batchMsg := protomem.Message{ Sum: &protomem.Message_Txs{ Txs: &protomem.Txs{Txs: [][]byte{largestTx}}, }, } - return map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - mempool.MempoolChannel: { - MsgType: new(protomem.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(mempool.MempoolChannel), - Priority: 5, - RecvMessageCapacity: batchMsg.Size(), - RecvBufferCapacity: 128, - MaxSendBytes: 5000, - }, - }, + return &p2p.ChannelDescriptor{ + ID: mempool.MempoolChannel, + MessageType: new(protomem.Message), + Priority: 5, + RecvMessageCapacity: batchMsg.Size(), + RecvBufferCapacity: 128, } } @@ -117,7 +107,7 @@ func GetChannelShims(config *cfg.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDe // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. func (r *Reactor) OnStart() error { - if !r.config.Broadcast { + if !r.cfg.Broadcast { r.Logger.Info("tx broadcasting is disabled") } @@ -171,7 +161,7 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { for _, tx := range protoTxs { if err := r.mempool.CheckTx(context.Background(), types.Tx(tx), nil, txInfo); err != nil { - logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(tx)), "err", err) + logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "err", err) } } @@ -254,7 +244,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { return } - if r.config.Broadcast { + if r.cfg.Broadcast { // Check if we've already started a goroutine for this peer, if not we create // a new done channel so we can explicitly close the goroutine if the peer // is later removed, we increment the waitgroup so the reactor can stop @@ -378,7 +368,7 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) } r.Logger.Debug( "gossiped tx to peer", - "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(memTx.tx)), + "tx", fmt.Sprintf("%X", memTx.tx.Hash()), "peer", peerID, ) } diff --git a/internal/mempool/v0/reactor_test.go b/internal/mempool/v0/reactor_test.go index 91729b37c..69582284b 100644 --- a/internal/mempool/v0/reactor_test.go +++ b/internal/mempool/v0/reactor_test.go @@ -8,16 +8,16 @@ import ( "github.com/stretchr/testify/require" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -36,7 +36,7 @@ type reactorTestSuite struct { nodes []types.NodeID } -func setup(t *testing.T, cfg *cfg.MempoolConfig, numNodes int, chBuf uint) *reactorTestSuite { +func setup(t *testing.T, config *config.MempoolConfig, numNodes int, chBuf uint) *reactorTestSuite { t.Helper() rts := &reactorTestSuite{ @@ -50,12 +50,13 @@ func setup(t *testing.T, cfg *cfg.MempoolConfig, numNodes int, chBuf uint) *reac peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), } - chDesc := p2p.ChannelDescriptor{ID: byte(mempool.MempoolChannel)} - rts.mempoolChnnels = rts.network.MakeChannelsNoCleanup(t, chDesc, new(protomem.Message), int(chBuf)) + chDesc := GetChannelDescriptor(config) + chDesc.RecvBufferCapacity = int(chBuf) + rts.mempoolChnnels = rts.network.MakeChannelsNoCleanup(t, chDesc) for nodeID := range rts.network.Nodes { rts.kvstores[nodeID] = kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(rts.kvstores[nodeID]) + cc := abciclient.NewLocalCreator(rts.kvstores[nodeID]) mempool, memCleanup := newMempoolWithApp(cc) t.Cleanup(memCleanup) @@ -68,7 +69,7 @@ func setup(t *testing.T, cfg *cfg.MempoolConfig, numNodes int, chBuf uint) *reac rts.reactors[nodeID] = NewReactor( rts.logger.With("nodeID", nodeID), - cfg, + config, rts.network.Nodes[nodeID].PeerManager, mempool, rts.mempoolChnnels[nodeID], @@ -158,9 +159,9 @@ func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs types.Txs, ids ...typ func TestReactorBroadcastTxs(t *testing.T) { numTxs := 1000 numNodes := 10 - config := cfg.TestConfig() + cfg := config.TestConfig() - rts := setup(t, config.Mempool, numNodes, 0) + rts := setup(t, cfg.Mempool, numNodes, 0) primary := rts.nodes[0] secondaries := rts.nodes[1:] @@ -185,9 +186,9 @@ func TestReactorBroadcastTxs(t *testing.T) { func TestReactorConcurrency(t *testing.T) { numTxs := 5 numNodes := 2 - config := cfg.TestConfig() + cfg := config.TestConfig() - rts := setup(t, config.Mempool, numNodes, 0) + rts := setup(t, cfg.Mempool, numNodes, 0) primary := rts.nodes[0] secondary := rts.nodes[1] @@ -244,9 +245,9 @@ func TestReactorConcurrency(t *testing.T) { func TestReactorNoBroadcastToSender(t *testing.T) { numTxs := 1000 numNodes := 2 - config := cfg.TestConfig() + cfg := config.TestConfig() - rts := setup(t, config.Mempool, numNodes, uint(numTxs)) + rts := setup(t, cfg.Mempool, numNodes, uint(numTxs)) primary := rts.nodes[0] secondary := rts.nodes[1] @@ -267,16 +268,16 @@ func TestReactorNoBroadcastToSender(t *testing.T) { func TestReactor_MaxTxBytes(t *testing.T) { numNodes := 2 - config := cfg.TestConfig() + cfg := config.TestConfig() - rts := setup(t, config.Mempool, numNodes, 0) + rts := setup(t, cfg.Mempool, numNodes, 0) primary := rts.nodes[0] secondary := rts.nodes[1] // Broadcast a tx, which has the max size and ensure it's received by the // second reactor. - tx1 := tmrand.Bytes(config.Mempool.MaxTxBytes) + tx1 := tmrand.Bytes(cfg.Mempool.MaxTxBytes) err := rts.reactors[primary].mempool.CheckTx( context.Background(), tx1, @@ -297,7 +298,7 @@ func TestReactor_MaxTxBytes(t *testing.T) { rts.reactors[secondary].mempool.Flush() // broadcast a tx, which is beyond the max size and ensure it's not sent - tx2 := tmrand.Bytes(config.Mempool.MaxTxBytes + 1) + tx2 := tmrand.Bytes(cfg.Mempool.MaxTxBytes + 1) err = rts.mempools[primary].CheckTx(context.Background(), tx2, nil, mempool.TxInfo{SenderID: mempool.UnknownPeerID}) require.Error(t, err) @@ -305,11 +306,11 @@ func TestReactor_MaxTxBytes(t *testing.T) { } func TestDontExhaustMaxActiveIDs(t *testing.T) { - config := cfg.TestConfig() + cfg := config.TestConfig() // we're creating a single node network, but not starting the // network. - rts := setup(t, config.Mempool, 1, mempool.MaxActiveIDs+1) + rts := setup(t, cfg.Mempool, 1, mempool.MaxActiveIDs+1) nodeID := rts.nodes[0] @@ -374,9 +375,9 @@ func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { t.Skip("skipping test in short mode") } - config := cfg.TestConfig() + cfg := config.TestConfig() - rts := setup(t, config.Mempool, 2, 0) + rts := setup(t, cfg.Mempool, 2, 0) primary := rts.nodes[0] secondary := rts.nodes[1] diff --git a/internal/mempool/v1/mempool.go b/internal/mempool/v1/mempool.go index 850600697..a12fbc51b 100644 --- a/internal/mempool/v1/mempool.go +++ b/internal/mempool/v1/mempool.go @@ -3,6 +3,7 @@ package v1 import ( "bytes" "context" + "errors" "fmt" "sync/atomic" "time" @@ -12,10 +13,9 @@ import ( "github.com/tendermint/tendermint/internal/libs/clist" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" - pubmempool "github.com/tendermint/tendermint/pkg/mempool" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -239,7 +239,7 @@ func (txmp *TxMempool) CheckTx( txSize := len(tx) if txSize > txmp.config.MaxTxBytes { - return pubmempool.ErrTxTooLarge{ + return types.ErrTxTooLarge{ Max: txmp.config.MaxTxBytes, Actual: txSize, } @@ -247,7 +247,7 @@ func (txmp *TxMempool) CheckTx( if txmp.preCheck != nil { if err := txmp.preCheck(tx); err != nil { - return pubmempool.ErrPreCheck{ + return types.ErrPreCheck{ Reason: err, } } @@ -257,7 +257,7 @@ func (txmp *TxMempool) CheckTx( return err } - txHash := mempool.TxKey(tx) + txHash := tx.Key() // We add the transaction to the mempool's cache and if the transaction already // exists, i.e. false is returned, then we check if we've seen this transaction @@ -267,7 +267,7 @@ func (txmp *TxMempool) CheckTx( if wtx != nil && ok { // We already have the transaction stored and the we've already seen this // transaction from txInfo.SenderID. - return pubmempool.ErrTxInCache + return types.ErrTxInCache } txmp.logger.Debug("tx exists already in cache", "tx_hash", tx.Hash()) @@ -305,6 +305,19 @@ func (txmp *TxMempool) CheckTx( return nil } +func (txmp *TxMempool) RemoveTxByKey(txKey types.TxKey) error { + txmp.Lock() + defer txmp.Unlock() + + // remove the committed transaction from the transaction store and indexes + if wtx := txmp.txStore.GetTxByHash(txKey); wtx != nil { + txmp.removeTx(wtx, false) + return nil + } + + return errors.New("transaction not found") +} + // Flush flushes out the mempool. It acquires a read-lock, fetches all the // transactions currently in the transaction store and removes each transaction // from the store and all indexes and finally resets the cache. @@ -452,7 +465,7 @@ func (txmp *TxMempool) Update( } // remove the committed transaction from the transaction store and indexes - if wtx := txmp.txStore.GetTxByHash(mempool.TxKey(tx)); wtx != nil { + if wtx := txmp.txStore.GetTxByHash(tx.Key()); wtx != nil { txmp.removeTx(wtx, false) } } @@ -630,7 +643,7 @@ func (txmp *TxMempool) defaultTxCallback(req *abci.Request, res *abci.Response) tx := req.GetCheckTx().Tx wtx := txmp.recheckCursor.Value.(*WrappedTx) if !bytes.Equal(tx, wtx.tx) { - panic(fmt.Sprintf("re-CheckTx transaction mismatch; got: %X, expected: %X", wtx.tx.Hash(), mempool.TxKey(tx))) + panic(fmt.Sprintf("re-CheckTx transaction mismatch; got: %X, expected: %X", wtx.tx.Hash(), types.Tx(tx).Key())) } // Only evaluate transactions that have not been removed. This can happen @@ -648,7 +661,7 @@ func (txmp *TxMempool) defaultTxCallback(req *abci.Request, res *abci.Response) txmp.logger.Debug( "existing transaction no longer valid; failed re-CheckTx callback", "priority", wtx.priority, - "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(wtx.tx)), + "tx", fmt.Sprintf("%X", wtx.tx.Hash()), "err", err, "code", checkTxRes.CheckTx.Code, ) @@ -728,7 +741,7 @@ func (txmp *TxMempool) canAddTx(wtx *WrappedTx) error { ) if numTxs >= txmp.config.Size || int64(wtx.Size())+sizeBytes > txmp.config.MaxTxsBytes { - return pubmempool.ErrMempoolIsFull{ + return types.ErrMempoolIsFull{ NumTxs: numTxs, MaxTxs: txmp.config.Size, TxsBytes: sizeBytes, @@ -785,13 +798,13 @@ func (txmp *TxMempool) removeTx(wtx *WrappedTx, removeFromCache bool) { // the height and time based indexes. func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { now := time.Now() - expiredTxs := make(map[[mempool.TxKeySize]byte]*WrappedTx) + expiredTxs := make(map[types.TxKey]*WrappedTx) if txmp.config.TTLNumBlocks > 0 { purgeIdx := -1 for i, wtx := range txmp.heightIndex.txs { if (blockHeight - wtx.height) > txmp.config.TTLNumBlocks { - expiredTxs[mempool.TxKey(wtx.tx)] = wtx + expiredTxs[wtx.tx.Key()] = wtx purgeIdx = i } else { // since the index is sorted, we know no other txs can be be purged @@ -808,7 +821,7 @@ func (txmp *TxMempool) purgeExpiredTxs(blockHeight int64) { purgeIdx := -1 for i, wtx := range txmp.timestampIndex.txs { if now.Sub(wtx.timestamp) > txmp.config.TTLDuration { - expiredTxs[mempool.TxKey(wtx.tx)] = wtx + expiredTxs[wtx.tx.Key()] = wtx purgeIdx = i } else { // since the index is sorted, we know no other txs can be be purged diff --git a/internal/mempool/v1/mempool_test.go b/internal/mempool/v1/mempool_test.go index df26be726..8bed5520a 100644 --- a/internal/mempool/v1/mempool_test.go +++ b/internal/mempool/v1/mempool_test.go @@ -15,13 +15,14 @@ import ( "time" "github.com/stretchr/testify/require" + + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -76,12 +77,12 @@ func setup(t testing.TB, cacheSize int, options ...TxMempoolOption) *TxMempool { t.Helper() app := &application{kvstore.NewApplication()} - cc := proxy.NewLocalClientCreator(app) + cc := abciclient.NewLocalCreator(app) cfg := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) cfg.Mempool.CacheSize = cacheSize - appConnMem, err := cc.NewABCIClient() + appConnMem, err := cc() require.NoError(t, err) require.NoError(t, appConnMem.Start()) @@ -225,10 +226,10 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) { require.Equal(t, len(tTxs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) - txMap := make(map[[mempool.TxKeySize]byte]testTx) + txMap := make(map[types.TxKey]testTx) priorities := make([]int64, len(tTxs)) for i, tTx := range tTxs { - txMap[mempool.TxKey(tTx.tx)] = tTx + txMap[tTx.tx.Key()] = tTx priorities[i] = tTx.priority } @@ -240,7 +241,7 @@ func TestTxMempool_ReapMaxBytesMaxGas(t *testing.T) { ensurePrioritized := func(reapedTxs types.Txs) { reapedPriorities := make([]int64, len(reapedTxs)) for i, rTx := range reapedTxs { - reapedPriorities[i] = txMap[mempool.TxKey(rTx)].priority + reapedPriorities[i] = txMap[rTx.Key()].priority } require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities) @@ -275,10 +276,10 @@ func TestTxMempool_ReapMaxTxs(t *testing.T) { require.Equal(t, len(tTxs), txmp.Size()) require.Equal(t, int64(5690), txmp.SizeBytes()) - txMap := make(map[[mempool.TxKeySize]byte]testTx) + txMap := make(map[types.TxKey]testTx) priorities := make([]int64, len(tTxs)) for i, tTx := range tTxs { - txMap[mempool.TxKey(tTx.tx)] = tTx + txMap[tTx.tx.Key()] = tTx priorities[i] = tTx.priority } @@ -290,7 +291,7 @@ func TestTxMempool_ReapMaxTxs(t *testing.T) { ensurePrioritized := func(reapedTxs types.Txs) { reapedPriorities := make([]int64, len(reapedTxs)) for i, rTx := range reapedTxs { - reapedPriorities[i] = txMap[mempool.TxKey(rTx)].priority + reapedPriorities[i] = txMap[rTx.Key()].priority } require.Equal(t, priorities[:len(reapedPriorities)], reapedPriorities) diff --git a/internal/mempool/v1/reactor.go b/internal/mempool/v1/reactor.go index 9deb7aace..8ef5a6bd8 100644 --- a/internal/mempool/v1/reactor.go +++ b/internal/mempool/v1/reactor.go @@ -8,7 +8,7 @@ import ( "sync" "time" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" @@ -37,9 +37,9 @@ type PeerManager interface { type Reactor struct { service.BaseService - config *cfg.MempoolConfig + cfg *config.MempoolConfig mempool *TxMempool - ids *mempool.MempoolIDs + ids *mempool.IDs // XXX: Currently, this is the only way to get information about a peer. Ideally, // we rely on message-oriented communication to get necessary peer data. @@ -54,6 +54,10 @@ type Reactor struct { // goroutines. peerWG sync.WaitGroup + // observePanic is a function for observing panics that were recovered in methods on + // Reactor. observePanic is called with the recovered value. + observePanic func(interface{}) + mtx tmsync.Mutex peerRoutines map[types.NodeID]*tmsync.Closer } @@ -61,7 +65,7 @@ type Reactor struct { // NewReactor returns a reference to a new reactor. func NewReactor( logger log.Logger, - config *cfg.MempoolConfig, + cfg *config.MempoolConfig, peerMgr PeerManager, txmp *TxMempool, mempoolCh *p2p.Channel, @@ -69,7 +73,7 @@ func NewReactor( ) *Reactor { r := &Reactor{ - config: config, + cfg: cfg, peerMgr: peerMgr, mempool: txmp, ids: mempool.NewMempoolIDs(), @@ -77,38 +81,31 @@ func NewReactor( peerUpdates: peerUpdates, closeCh: make(chan struct{}), peerRoutines: make(map[types.NodeID]*tmsync.Closer), + observePanic: defaultObservePanic, } r.BaseService = *service.NewBaseService(logger, "Mempool", r) return r } -// GetChannelShims returns a map of ChannelDescriptorShim objects, where each -// object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding -// p2p proto.Message the new p2p Channel is responsible for handling. -// -// -// TODO: Remove once p2p refactor is complete. -// ref: https://github.com/tendermint/tendermint/issues/5670 -func GetChannelShims(config *cfg.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDescriptorShim { - largestTx := make([]byte, config.MaxTxBytes) +func defaultObservePanic(r interface{}) {} + +// GetChannelDescriptor produces an instance of a descriptor for this +// package's required channels. +func GetChannelDescriptor(cfg *config.MempoolConfig) *p2p.ChannelDescriptor { + largestTx := make([]byte, cfg.MaxTxBytes) batchMsg := protomem.Message{ Sum: &protomem.Message_Txs{ Txs: &protomem.Txs{Txs: [][]byte{largestTx}}, }, } - return map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - mempool.MempoolChannel: { - MsgType: new(protomem.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(mempool.MempoolChannel), - Priority: 5, - RecvMessageCapacity: batchMsg.Size(), - RecvBufferCapacity: 128, - MaxSendBytes: 5000, - }, - }, + return &p2p.ChannelDescriptor{ + ID: mempool.MempoolChannel, + MessageType: new(protomem.Message), + Priority: 5, + RecvMessageCapacity: batchMsg.Size(), + RecvBufferCapacity: 128, } } @@ -117,7 +114,7 @@ func GetChannelShims(config *cfg.MempoolConfig) map[p2p.ChannelID]*p2p.ChannelDe // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. func (r *Reactor) OnStart() error { - if !r.config.Broadcast { + if !r.cfg.Broadcast { r.Logger.Info("tx broadcasting is disabled") } @@ -171,7 +168,7 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { for _, tx := range protoTxs { if err := r.mempool.CheckTx(context.Background(), types.Tx(tx), nil, txInfo); err != nil { - logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(tx)), "err", err) + logger.Error("checktx failed for tx", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "err", err) } } @@ -188,6 +185,7 @@ func (r *Reactor) handleMempoolMessage(envelope p2p.Envelope) error { func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { + r.observePanic(e) err = fmt.Errorf("panic in processing message: %v", e) r.Logger.Error( "recovering from processing message panic", @@ -254,7 +252,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { return } - if r.config.Broadcast { + if r.cfg.Broadcast { // Check if we've already started a goroutine for this peer, if not we create // a new done channel so we can explicitly close the goroutine if the peer // is later removed, we increment the waitgroup so the reactor can stop @@ -318,6 +316,7 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) r.peerWG.Done() if e := recover(); e != nil { + r.observePanic(e) r.Logger.Error( "recovering from broadcasting mempool loop", "err", e, @@ -377,7 +376,7 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) } r.Logger.Debug( "gossiped tx to peer", - "tx", fmt.Sprintf("%X", mempool.TxHashFromBytes(memTx.tx)), + "tx", fmt.Sprintf("%X", memTx.tx.Hash()), "peer", peerID, ) } diff --git a/internal/mempool/v1/reactor_test.go b/internal/mempool/v1/reactor_test.go new file mode 100644 index 000000000..56e6057a1 --- /dev/null +++ b/internal/mempool/v1/reactor_test.go @@ -0,0 +1,145 @@ +package v1 + +import ( + "os" + "strings" + "sync" + "testing" + + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/config" + tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/p2p/p2ptest" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/types" +) + +type reactorTestSuite struct { + network *p2ptest.Network + logger log.Logger + + reactors map[types.NodeID]*Reactor + mempoolChannels map[types.NodeID]*p2p.Channel + mempools map[types.NodeID]*TxMempool + kvstores map[types.NodeID]*kvstore.Application + + peerChans map[types.NodeID]chan p2p.PeerUpdate + peerUpdates map[types.NodeID]*p2p.PeerUpdates + + nodes []types.NodeID +} + +func setupReactors(t *testing.T, numNodes int, chBuf uint) *reactorTestSuite { + t.Helper() + + cfg := config.ResetTestRoot(strings.ReplaceAll(t.Name(), "/", "|")) + t.Cleanup(func() { + os.RemoveAll(cfg.RootDir) + }) + + rts := &reactorTestSuite{ + logger: log.TestingLogger().With("testCase", t.Name()), + network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), + reactors: make(map[types.NodeID]*Reactor, numNodes), + mempoolChannels: make(map[types.NodeID]*p2p.Channel, numNodes), + mempools: make(map[types.NodeID]*TxMempool, numNodes), + kvstores: make(map[types.NodeID]*kvstore.Application, numNodes), + peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), + peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), + } + + chDesc := GetChannelDescriptor(cfg.Mempool) + rts.mempoolChannels = rts.network.MakeChannelsNoCleanup(t, chDesc) + + for nodeID := range rts.network.Nodes { + rts.kvstores[nodeID] = kvstore.NewApplication() + + mempool := setup(t, 0) + rts.mempools[nodeID] = mempool + + rts.peerChans[nodeID] = make(chan p2p.PeerUpdate) + rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1) + rts.network.Nodes[nodeID].PeerManager.Register(rts.peerUpdates[nodeID]) + + rts.reactors[nodeID] = NewReactor( + rts.logger.With("nodeID", nodeID), + cfg.Mempool, + rts.network.Nodes[nodeID].PeerManager, + mempool, + rts.mempoolChannels[nodeID], + rts.peerUpdates[nodeID], + ) + + rts.nodes = append(rts.nodes, nodeID) + + require.NoError(t, rts.reactors[nodeID].Start()) + require.True(t, rts.reactors[nodeID].IsRunning()) + } + + require.Len(t, rts.reactors, numNodes) + + t.Cleanup(func() { + for nodeID := range rts.reactors { + if rts.reactors[nodeID].IsRunning() { + require.NoError(t, rts.reactors[nodeID].Stop()) + require.False(t, rts.reactors[nodeID].IsRunning()) + } + } + }) + + return rts +} + +func (rts *reactorTestSuite) start(t *testing.T) { + t.Helper() + rts.network.Start(t) + require.Len(t, + rts.network.RandomNode().PeerManager.Peers(), + len(rts.nodes)-1, + "network does not have expected number of nodes") +} + +func TestReactorBroadcastDoesNotPanic(t *testing.T) { + numNodes := 2 + rts := setupReactors(t, numNodes, 0) + + observePanic := func(r interface{}) { + t.Fatal("panic detected in reactor") + } + + primary := rts.nodes[0] + secondary := rts.nodes[1] + primaryReactor := rts.reactors[primary] + primaryMempool := primaryReactor.mempool + secondaryReactor := rts.reactors[secondary] + + primaryReactor.observePanic = observePanic + secondaryReactor.observePanic = observePanic + + firstTx := &WrappedTx{} + primaryMempool.insertTx(firstTx) + + // run the router + rts.start(t) + + closer := tmsync.NewCloser() + primaryReactor.peerWG.Add(1) + go primaryReactor.broadcastTxRoutine(secondary, closer) + + wg := &sync.WaitGroup{} + for i := 0; i < 50; i++ { + next := &WrappedTx{} + wg.Add(1) + go func() { + defer wg.Done() + primaryMempool.insertTx(next) + }() + } + + err := primaryReactor.Stop() + require.NoError(t, err) + primaryReactor.peerWG.Wait() + wg.Wait() +} diff --git a/internal/mempool/v1/tx.go b/internal/mempool/v1/tx.go index 15173b91f..c5b7ca82f 100644 --- a/internal/mempool/v1/tx.go +++ b/internal/mempool/v1/tx.go @@ -6,7 +6,6 @@ import ( "github.com/tendermint/tendermint/internal/libs/clist" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/types" ) @@ -17,7 +16,7 @@ type WrappedTx struct { tx types.Tx // hash defines the transaction hash and the primary key used in the mempool - hash [mempool.TxKeySize]byte + hash types.TxKey // height defines the height at which the transaction was validated at height int64 @@ -66,14 +65,14 @@ func (wtx *WrappedTx) Size() int { // need mutative access. type TxStore struct { mtx tmsync.RWMutex - hashTxs map[[mempool.TxKeySize]byte]*WrappedTx // primary index - senderTxs map[string]*WrappedTx // sender is defined by the ABCI application + hashTxs map[types.TxKey]*WrappedTx // primary index + senderTxs map[string]*WrappedTx // sender is defined by the ABCI application } func NewTxStore() *TxStore { return &TxStore{ senderTxs: make(map[string]*WrappedTx), - hashTxs: make(map[[mempool.TxKeySize]byte]*WrappedTx), + hashTxs: make(map[types.TxKey]*WrappedTx), } } @@ -110,7 +109,7 @@ func (txs *TxStore) GetTxBySender(sender string) *WrappedTx { } // GetTxByHash returns a *WrappedTx by the transaction's hash. -func (txs *TxStore) GetTxByHash(hash [mempool.TxKeySize]byte) *WrappedTx { +func (txs *TxStore) GetTxByHash(hash types.TxKey) *WrappedTx { txs.mtx.RLock() defer txs.mtx.RUnlock() @@ -119,7 +118,7 @@ func (txs *TxStore) GetTxByHash(hash [mempool.TxKeySize]byte) *WrappedTx { // IsTxRemoved returns true if a transaction by hash is marked as removed and // false otherwise. -func (txs *TxStore) IsTxRemoved(hash [mempool.TxKeySize]byte) bool { +func (txs *TxStore) IsTxRemoved(hash types.TxKey) bool { txs.mtx.RLock() defer txs.mtx.RUnlock() @@ -142,7 +141,7 @@ func (txs *TxStore) SetTx(wtx *WrappedTx) { txs.senderTxs[wtx.sender] = wtx } - txs.hashTxs[mempool.TxKey(wtx.tx)] = wtx + txs.hashTxs[wtx.tx.Key()] = wtx } // RemoveTx removes a *WrappedTx from the transaction store. It deletes all @@ -155,13 +154,13 @@ func (txs *TxStore) RemoveTx(wtx *WrappedTx) { delete(txs.senderTxs, wtx.sender) } - delete(txs.hashTxs, mempool.TxKey(wtx.tx)) + delete(txs.hashTxs, wtx.tx.Key()) wtx.removed = true } // TxHasPeer returns true if a transaction by hash has a given peer ID and false // otherwise. If the transaction does not exist, false is returned. -func (txs *TxStore) TxHasPeer(hash [mempool.TxKeySize]byte, peerID uint16) bool { +func (txs *TxStore) TxHasPeer(hash types.TxKey, peerID uint16) bool { txs.mtx.RLock() defer txs.mtx.RUnlock() @@ -179,7 +178,7 @@ func (txs *TxStore) TxHasPeer(hash [mempool.TxKeySize]byte, peerID uint16) bool // We return true if we've already recorded the given peer for this transaction // and false otherwise. If the transaction does not exist by hash, we return // (nil, false). -func (txs *TxStore) GetOrSetPeerByTxHash(hash [mempool.TxKeySize]byte, peerID uint16) (*WrappedTx, bool) { +func (txs *TxStore) GetOrSetPeerByTxHash(hash types.TxKey, peerID uint16) (*WrappedTx, bool) { txs.mtx.Lock() defer txs.mtx.Unlock() diff --git a/internal/mempool/v1/tx_test.go b/internal/mempool/v1/tx_test.go index c5d488669..fb4beafab 100644 --- a/internal/mempool/v1/tx_test.go +++ b/internal/mempool/v1/tx_test.go @@ -8,7 +8,7 @@ import ( "time" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/types" ) func TestTxStore_GetTxBySender(t *testing.T) { @@ -39,7 +39,7 @@ func TestTxStore_GetTxByHash(t *testing.T) { timestamp: time.Now(), } - key := mempool.TxKey(wtx.tx) + key := wtx.tx.Key() res := txs.GetTxByHash(key) require.Nil(t, res) @@ -58,7 +58,7 @@ func TestTxStore_SetTx(t *testing.T) { timestamp: time.Now(), } - key := mempool.TxKey(wtx.tx) + key := wtx.tx.Key() txs.SetTx(wtx) res := txs.GetTxByHash(key) @@ -81,10 +81,10 @@ func TestTxStore_GetOrSetPeerByTxHash(t *testing.T) { timestamp: time.Now(), } - key := mempool.TxKey(wtx.tx) + key := wtx.tx.Key() txs.SetTx(wtx) - res, ok := txs.GetOrSetPeerByTxHash(mempool.TxKey([]byte("test_tx_2")), 15) + res, ok := txs.GetOrSetPeerByTxHash(types.Tx([]byte("test_tx_2")).Key(), 15) require.Nil(t, res) require.False(t, ok) @@ -110,7 +110,7 @@ func TestTxStore_RemoveTx(t *testing.T) { txs.SetTx(wtx) - key := mempool.TxKey(wtx.tx) + key := wtx.tx.Key() res := txs.GetTxByHash(key) require.NotNil(t, res) diff --git a/internal/p2p/base_reactor.go b/internal/p2p/base_reactor.go deleted file mode 100644 index 09925caf8..000000000 --- a/internal/p2p/base_reactor.go +++ /dev/null @@ -1,74 +0,0 @@ -package p2p - -import ( - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/service" -) - -// Reactor is responsible for handling incoming messages on one or more -// Channel. Switch calls GetChannels when reactor is added to it. When a new -// peer joins our node, InitPeer and AddPeer are called. RemovePeer is called -// when the peer is stopped. Receive is called when a message is received on a -// channel associated with this reactor. -// -// Peer#Send or Peer#TrySend should be used to send the message to a peer. -type Reactor interface { - service.Service // Start, Stop - - // SetSwitch allows setting a switch. - SetSwitch(*Switch) - - // GetChannels returns the list of MConnection.ChannelDescriptor. Make sure - // that each ID is unique across all the reactors added to the switch. - GetChannels() []*conn.ChannelDescriptor - - // InitPeer is called by the switch before the peer is started. Use it to - // initialize data for the peer (e.g. peer state). - // - // NOTE: The switch won't call AddPeer nor RemovePeer if it fails to start - // the peer. Do not store any data associated with the peer in the reactor - // itself unless you don't want to have a state, which is never cleaned up. - InitPeer(peer Peer) Peer - - // AddPeer is called by the switch after the peer is added and successfully - // started. Use it to start goroutines communicating with the peer. - AddPeer(peer Peer) - - // RemovePeer is called by the switch when the peer is stopped (due to error - // or other reason). - RemovePeer(peer Peer, reason interface{}) - - // Receive is called by the switch when msgBytes is received from the peer. - // - // NOTE reactor can not keep msgBytes around after Receive completes without - // copying. - // - // CONTRACT: msgBytes are not nil. - // - // XXX: do not call any methods that can block or incur heavy processing. - // https://github.com/tendermint/tendermint/issues/2888 - Receive(chID byte, peer Peer, msgBytes []byte) -} - -//-------------------------------------- - -type BaseReactor struct { - service.BaseService // Provides Start, Stop, .Quit - Switch *Switch -} - -func NewBaseReactor(name string, impl Reactor) *BaseReactor { - return &BaseReactor{ - BaseService: *service.NewBaseService(nil, name, impl), - Switch: nil, - } -} - -func (br *BaseReactor) SetSwitch(sw *Switch) { - br.Switch = sw -} -func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil } -func (*BaseReactor) AddPeer(peer Peer) {} -func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {} -func (*BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {} -func (*BaseReactor) InitPeer(peer Peer) Peer { return peer } diff --git a/internal/p2p/conn/conn_go110.go b/internal/p2p/conn/conn_go110.go index 682188101..459c3169b 100644 --- a/internal/p2p/conn/conn_go110.go +++ b/internal/p2p/conn/conn_go110.go @@ -1,3 +1,4 @@ +//go:build go1.10 // +build go1.10 package conn diff --git a/internal/p2p/conn/conn_notgo110.go b/internal/p2p/conn/conn_notgo110.go index ed642eb54..21dffad2c 100644 --- a/internal/p2p/conn/conn_notgo110.go +++ b/internal/p2p/conn/conn_notgo110.go @@ -1,3 +1,4 @@ +//go:build !go1.10 // +build !go1.10 package conn diff --git a/internal/p2p/conn/connection.go b/internal/p2p/conn/connection.go index dc5bacc39..a99e83dc5 100644 --- a/internal/p2p/conn/connection.go +++ b/internal/p2p/conn/connection.go @@ -48,7 +48,7 @@ const ( defaultPongTimeout = 45 * time.Second ) -type receiveCbFunc func(chID byte, msgBytes []byte) +type receiveCbFunc func(chID ChannelID, msgBytes []byte) type errorCbFunc func(interface{}) /* @@ -64,15 +64,11 @@ initialization of the connection. There are two methods for sending messages: func (m MConnection) Send(chID byte, msgBytes []byte) bool {} - func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {} `Send(chID, msgBytes)` is a blocking call that waits until `msg` is successfully queued for the channel with the given id byte `chID`, or until the request times out. The message `msg` is serialized using Protobuf. -`TrySend(chID, msgBytes)` is a nonblocking call that returns false if the -channel's queue is full. - Inbound message bytes are handled with an onReceive callback function. */ type MConnection struct { @@ -85,8 +81,8 @@ type MConnection struct { recvMonitor *flow.Monitor send chan struct{} pong chan struct{} - channels []*Channel - channelsIdx map[byte]*Channel + channels []*channel + channelsIdx map[ChannelID]*channel onReceive receiveCbFunc onError errorCbFunc errored uint32 @@ -190,8 +186,8 @@ func NewMConnectionWithConfig( } // Create channels - var channelsIdx = map[byte]*Channel{} - var channels = []*Channel{} + var channelsIdx = map[ChannelID]*channel{} + var channels = []*channel{} for _, desc := range chDescs { channel := newChannel(mconn, *desc) @@ -265,43 +261,6 @@ func (c *MConnection) stopServices() (alreadyStopped bool) { return false } -// FlushStop replicates the logic of OnStop. -// It additionally ensures that all successful -// .Send() calls will get flushed before closing -// the connection. -func (c *MConnection) FlushStop() { - if c.stopServices() { - return - } - - // this block is unique to FlushStop - { - // wait until the sendRoutine exits - // so we dont race on calling sendSomePacketMsgs - <-c.doneSendRoutine - - // Send and flush all pending msgs. - // Since sendRoutine has exited, we can call this - // safely - eof := c.sendSomePacketMsgs() - for !eof { - eof = c.sendSomePacketMsgs() - } - c.flush() - - // Now we can close the connection - } - - c.conn.Close() - - // We can't close pong safely here because - // recvRoutine may write to it after we've stopped. - // Though it doesn't need to get closed at all, - // we close it @ recvRoutine. - - // c.Stop() -} - // OnStop implements BaseService func (c *MConnection) OnStop() { if c.stopServices() { @@ -348,7 +307,7 @@ func (c *MConnection) stopForError(r interface{}) { } // Queues a message to be sent to channel. -func (c *MConnection) Send(chID byte, msgBytes []byte) bool { +func (c *MConnection) Send(chID ChannelID, msgBytes []byte) bool { if !c.IsRunning() { return false } @@ -375,49 +334,6 @@ func (c *MConnection) Send(chID byte, msgBytes []byte) bool { return success } -// Queues a message to be sent to channel. -// Nonblocking, returns true if successful. -func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool { - if !c.IsRunning() { - return false - } - - c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msgBytes", msgBytes) - - // Send message to channel. - channel, ok := c.channelsIdx[chID] - if !ok { - c.Logger.Error(fmt.Sprintf("Cannot send bytes, unknown channel %X", chID)) - return false - } - - ok = channel.trySendBytes(msgBytes) - if ok { - // Wake up sendRoutine if necessary - select { - case c.send <- struct{}{}: - default: - } - } - - return ok -} - -// CanSend returns true if you can send more data onto the chID, false -// otherwise. Use only as a heuristic. -func (c *MConnection) CanSend(chID byte) bool { - if !c.IsRunning() { - return false - } - - channel, ok := c.channelsIdx[chID] - if !ok { - c.Logger.Error(fmt.Sprintf("Unknown channel %X", chID)) - return false - } - return channel.canSend() -} - // sendRoutine polls for packets to send from channels. func (c *MConnection) sendRoutine() { defer c._recover() @@ -520,7 +436,7 @@ func (c *MConnection) sendPacketMsg() bool { // Choose a channel to create a PacketMsg from. // The chosen channel will be the one whose recentlySent/priority is the least. var leastRatio float32 = math.MaxFloat32 - var leastChannel *Channel + var leastChannel *channel for _, channel := range c.channels { // If nothing to send, skip this channel if !channel.isSendPending() { @@ -624,7 +540,7 @@ FOR_LOOP: // never block } case *tmp2p.Packet_PacketMsg: - channelID := byte(pkt.PacketMsg.ChannelID) + channelID := ChannelID(pkt.PacketMsg.ChannelID) channel, ok := c.channelsIdx[channelID] if pkt.PacketMsg.ChannelID < 0 || pkt.PacketMsg.ChannelID > math.MaxUint8 || !ok || channel == nil { err := fmt.Errorf("unknown channel %X", pkt.PacketMsg.ChannelID) @@ -682,13 +598,6 @@ func (c *MConnection) maxPacketMsgSize() int { return len(bz) } -type ConnectionStatus struct { - Duration time.Duration - SendMonitor flow.Status - RecvMonitor flow.Status - Channels []ChannelStatus -} - type ChannelStatus struct { ID byte SendQueueCapacity int @@ -697,30 +606,16 @@ type ChannelStatus struct { RecentlySent int64 } -func (c *MConnection) Status() ConnectionStatus { - var status ConnectionStatus - status.Duration = time.Since(c.created) - status.SendMonitor = c.sendMonitor.Status() - status.RecvMonitor = c.recvMonitor.Status() - status.Channels = make([]ChannelStatus, len(c.channels)) - for i, channel := range c.channels { - status.Channels[i] = ChannelStatus{ - ID: channel.desc.ID, - SendQueueCapacity: cap(channel.sendQueue), - SendQueueSize: int(atomic.LoadInt32(&channel.sendQueueSize)), - Priority: channel.desc.Priority, - RecentlySent: atomic.LoadInt64(&channel.recentlySent), - } - } - return status -} - //----------------------------------------------------------------------------- +// ChannelID is an arbitrary channel ID. +type ChannelID uint16 type ChannelDescriptor struct { - ID byte + ID ChannelID Priority int + MessageType proto.Message + // TODO: Remove once p2p refactor is complete. SendQueueCapacity int RecvMessageCapacity int @@ -728,10 +623,6 @@ type ChannelDescriptor struct { // RecvBufferCapacity defines the max buffer size of inbound messages for a // given p2p Channel queue. RecvBufferCapacity int - - // MaxSendBytes defines the maximum number of bytes that can be sent at any - // given moment from a Channel to a peer. - MaxSendBytes uint } func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) { @@ -748,28 +639,32 @@ func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) { return } -// TODO: lowercase. // NOTE: not goroutine-safe. -type Channel struct { +type channel struct { + // Exponential moving average. + // This field must be accessed atomically. + // It is first in the struct to ensure correct alignment. + // See https://github.com/tendermint/tendermint/issues/7000. + recentlySent int64 + conn *MConnection desc ChannelDescriptor sendQueue chan []byte sendQueueSize int32 // atomic. recving []byte sending []byte - recentlySent int64 // exponential moving average maxPacketMsgPayloadSize int Logger log.Logger } -func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel { +func newChannel(conn *MConnection, desc ChannelDescriptor) *channel { desc = desc.FillDefaults() if desc.Priority <= 0 { panic("Channel default priority must be a positive integer") } - return &Channel{ + return &channel{ conn: conn, desc: desc, sendQueue: make(chan []byte, desc.SendQueueCapacity), @@ -778,14 +673,14 @@ func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel { } } -func (ch *Channel) SetLogger(l log.Logger) { +func (ch *channel) SetLogger(l log.Logger) { ch.Logger = l } // Queues message to send to this channel. // Goroutine-safe // Times out (and returns false) after defaultSendTimeout -func (ch *Channel) sendBytes(bytes []byte) bool { +func (ch *channel) sendBytes(bytes []byte) bool { select { case ch.sendQueue <- bytes: atomic.AddInt32(&ch.sendQueueSize, 1) @@ -795,34 +690,10 @@ func (ch *Channel) sendBytes(bytes []byte) bool { } } -// Queues message to send to this channel. -// Nonblocking, returns true if successful. -// Goroutine-safe -func (ch *Channel) trySendBytes(bytes []byte) bool { - select { - case ch.sendQueue <- bytes: - atomic.AddInt32(&ch.sendQueueSize, 1) - return true - default: - return false - } -} - -// Goroutine-safe -func (ch *Channel) loadSendQueueSize() (size int) { - return int(atomic.LoadInt32(&ch.sendQueueSize)) -} - -// Goroutine-safe -// Use only as a heuristic. -func (ch *Channel) canSend() bool { - return ch.loadSendQueueSize() < defaultSendQueueCapacity -} - // Returns true if any PacketMsgs are pending to be sent. // Call before calling nextPacketMsg() // Goroutine-safe -func (ch *Channel) isSendPending() bool { +func (ch *channel) isSendPending() bool { if len(ch.sending) == 0 { if len(ch.sendQueue) == 0 { return false @@ -834,7 +705,7 @@ func (ch *Channel) isSendPending() bool { // Creates a new PacketMsg to send. // Not goroutine-safe -func (ch *Channel) nextPacketMsg() tmp2p.PacketMsg { +func (ch *channel) nextPacketMsg() tmp2p.PacketMsg { packet := tmp2p.PacketMsg{ChannelID: int32(ch.desc.ID)} maxSize := ch.maxPacketMsgPayloadSize packet.Data = ch.sending[:tmmath.MinInt(maxSize, len(ch.sending))] @@ -851,7 +722,7 @@ func (ch *Channel) nextPacketMsg() tmp2p.PacketMsg { // Writes next PacketMsg to w and updates c.recentlySent. // Not goroutine-safe -func (ch *Channel) writePacketMsgTo(w io.Writer) (n int, err error) { +func (ch *channel) writePacketMsgTo(w io.Writer) (n int, err error) { packet := ch.nextPacketMsg() n, err = protoio.NewDelimitedWriter(w).WriteMsg(mustWrapPacket(&packet)) atomic.AddInt64(&ch.recentlySent, int64(n)) @@ -861,7 +732,7 @@ func (ch *Channel) writePacketMsgTo(w io.Writer) (n int, err error) { // Handles incoming PacketMsgs. It returns a message bytes if message is // complete, which is owned by the caller and will not be modified. // Not goroutine-safe -func (ch *Channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) { +func (ch *channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) { ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet) var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Data) if recvCap < recvReceived { @@ -878,7 +749,7 @@ func (ch *Channel) recvPacketMsg(packet tmp2p.PacketMsg) ([]byte, error) { // Call this periodically to update stats for throttling purposes. // Not goroutine-safe -func (ch *Channel) updateStats() { +func (ch *channel) updateStats() { // Exponential decay of stats. // TODO: optimize. atomic.StoreInt64(&ch.recentlySent, int64(float64(atomic.LoadInt64(&ch.recentlySent))*0.8)) diff --git a/internal/p2p/conn/connection_test.go b/internal/p2p/conn/connection_test.go index 6d009f85c..1ed179ad8 100644 --- a/internal/p2p/conn/connection_test.go +++ b/internal/p2p/conn/connection_test.go @@ -20,7 +20,7 @@ import ( const maxPingPongPacketSize = 1024 // bytes func createTestMConnection(conn net.Conn) *MConnection { - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(chID ChannelID, msgBytes []byte) { } onError := func(r interface{}) { } @@ -31,7 +31,7 @@ func createTestMConnection(conn net.Conn) *MConnection { func createMConnectionWithCallbacks( conn net.Conn, - onReceive func(chID byte, msgBytes []byte), + onReceive func(chID ChannelID, msgBytes []byte), onError func(r interface{}), ) *MConnection { cfg := DefaultMConnConfig() @@ -69,9 +69,6 @@ func TestMConnectionSendFlushStop(t *testing.T) { errCh <- err }() - // stop the conn - it should flush all conns - clientConn.FlushStop() - timer := time.NewTimer(3 * time.Second) select { case <-errCh: @@ -97,16 +94,14 @@ func TestMConnectionSend(t *testing.T) { if err != nil { t.Error(err) } - assert.True(t, mconn.CanSend(0x01)) msg = []byte("Spider-Man") - assert.True(t, mconn.TrySend(0x01, msg)) + assert.True(t, mconn.Send(0x01, msg)) _, err = server.Read(make([]byte, len(msg))) if err != nil { t.Error(err) } - assert.False(t, mconn.CanSend(0x05), "CanSend should return false because channel is unknown") assert.False(t, mconn.Send(0x05, []byte("Absorbing Man")), "Send should return false because channel is unknown") } @@ -116,7 +111,7 @@ func TestMConnectionReceive(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(chID ChannelID, msgBytes []byte) { receivedCh <- msgBytes } onError := func(r interface{}) { @@ -145,27 +140,13 @@ func TestMConnectionReceive(t *testing.T) { } } -func TestMConnectionStatus(t *testing.T) { - server, client := NetPipe() - t.Cleanup(closeAll(t, client, server)) - - mconn := createTestMConnection(client) - err := mconn.Start() - require.Nil(t, err) - t.Cleanup(stopAll(t, mconn)) - - status := mconn.Status() - assert.NotNil(t, status) - assert.Zero(t, status.Channels[0].SendQueueSize) -} - func TestMConnectionPongTimeoutResultsInError(t *testing.T) { server, client := net.Pipe() t.Cleanup(closeAll(t, client, server)) receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(chID ChannelID, msgBytes []byte) { receivedCh <- msgBytes } onError := func(r interface{}) { @@ -203,7 +184,7 @@ func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(chID ChannelID, msgBytes []byte) { receivedCh <- msgBytes } onError := func(r interface{}) { @@ -257,7 +238,7 @@ func TestMConnectionMultiplePings(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(chID ChannelID, msgBytes []byte) { receivedCh <- msgBytes } onError := func(r interface{}) { @@ -304,7 +285,7 @@ func TestMConnectionPingPongs(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(chID ChannelID, msgBytes []byte) { receivedCh <- msgBytes } onError := func(r interface{}) { @@ -361,7 +342,7 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) { receivedCh := make(chan []byte) errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { + onReceive := func(chID ChannelID, msgBytes []byte) { receivedCh <- msgBytes } onError := func(r interface{}) { @@ -390,7 +371,7 @@ func TestMConnectionStopsAndReturnsError(t *testing.T) { func newClientAndServerConnsForReadErrors(t *testing.T, chOnErr chan struct{}) (*MConnection, *MConnection) { server, client := NetPipe() - onReceive := func(chID byte, msgBytes []byte) {} + onReceive := func(chID ChannelID, msgBytes []byte) {} onError := func(r interface{}) {} // create client conn with two channels @@ -462,7 +443,7 @@ func TestMConnectionReadErrorLongMessage(t *testing.T) { mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) t.Cleanup(stopAll(t, mconnClient, mconnServer)) - mconnServer.onReceive = func(chID byte, msgBytes []byte) { + mconnServer.onReceive = func(chID ChannelID, msgBytes []byte) { chOnRcv <- struct{}{} } @@ -514,18 +495,15 @@ func TestMConnectionTrySend(t *testing.T) { msg := []byte("Semicolon-Woman") resultCh := make(chan string, 2) - assert.True(t, mconn.TrySend(0x01, msg)) + assert.True(t, mconn.Send(0x01, msg)) _, err = server.Read(make([]byte, len(msg))) require.NoError(t, err) - assert.True(t, mconn.CanSend(0x01)) - assert.True(t, mconn.TrySend(0x01, msg)) - assert.False(t, mconn.CanSend(0x01)) + assert.True(t, mconn.Send(0x01, msg)) go func() { - mconn.TrySend(0x01, msg) + mconn.Send(0x01, msg) resultCh <- "TrySend" }() - assert.False(t, mconn.CanSend(0x01)) - assert.False(t, mconn.TrySend(0x01, msg)) + assert.False(t, mconn.Send(0x01, msg)) assert.Equal(t, "TrySend", <-resultCh) } @@ -560,7 +538,7 @@ func TestMConnectionChannelOverflow(t *testing.T) { mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) t.Cleanup(stopAll(t, mconnClient, mconnServer)) - mconnServer.onReceive = func(chID byte, msgBytes []byte) { + mconnServer.onReceive = func(chID ChannelID, msgBytes []byte) { chOnRcv <- struct{}{} } diff --git a/internal/p2p/conn/evil_secret_connection_test.go b/internal/p2p/conn/evil_secret_connection_test.go index 6d8b7cbf7..05e88cd85 100644 --- a/internal/p2p/conn/evil_secret_connection_test.go +++ b/internal/p2p/conn/evil_secret_connection_test.go @@ -13,7 +13,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/libs/protoio" tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" ) @@ -113,7 +113,7 @@ func (c *evilConn) Read(data []byte) (n int, err error) { case 1: signature := c.signChallenge() if !c.badAuthSignature { - pkpb, err := cryptoenc.PubKeyToProto(c.privKey.PubKey()) + pkpb, err := encoding.PubKeyToProto(c.privKey.PubKey()) if err != nil { panic(err) } diff --git a/internal/p2p/conn/secret_connection.go b/internal/p2p/conn/secret_connection.go index 2f0d269d6..35fac488a 100644 --- a/internal/p2p/conn/secret_connection.go +++ b/internal/p2p/conn/secret_connection.go @@ -23,7 +23,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/libs/protoio" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/async" @@ -406,7 +406,7 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte // Send our info and receive theirs in tandem. var trs, _ = async.Parallel( func(_ int) (val interface{}, abort bool, err error) { - pbpk, err := cryptoenc.PubKeyToProto(pubKey) + pbpk, err := encoding.PubKeyToProto(pubKey) if err != nil { return nil, true, err } @@ -423,7 +423,7 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte return nil, true, err // abort } - pk, err := cryptoenc.PubKeyFromProto(pba.PubKey) + pk, err := encoding.PubKeyFromProto(pba.PubKey) if err != nil { return nil, true, err // abort } diff --git a/internal/p2p/conn/secret_connection_test.go b/internal/p2p/conn/secret_connection_test.go index 62587c0da..84384011b 100644 --- a/internal/p2p/conn/secret_connection_test.go +++ b/internal/p2p/conn/secret_connection_test.go @@ -195,7 +195,7 @@ func TestSecretConnectionReadWrite(t *testing.T) { compareWritesReads := func(writes []string, reads []string) { for { // Pop next write & corresponding reads - var read, write string = "", writes[0] + var read, write = "", writes[0] var readCount = 0 for _, readChunk := range reads { read += readChunk diff --git a/internal/p2p/conn_set.go b/internal/p2p/conn_set.go deleted file mode 100644 index 987d9f968..000000000 --- a/internal/p2p/conn_set.go +++ /dev/null @@ -1,82 +0,0 @@ -package p2p - -import ( - "net" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" -) - -// ConnSet is a lookup table for connections and all their ips. -type ConnSet interface { - Has(net.Conn) bool - HasIP(net.IP) bool - Set(net.Conn, []net.IP) - Remove(net.Conn) - RemoveAddr(net.Addr) -} - -type connSetItem struct { - conn net.Conn - ips []net.IP -} - -type connSet struct { - tmsync.RWMutex - - conns map[string]connSetItem -} - -// NewConnSet returns a ConnSet implementation. -func NewConnSet() ConnSet { - return &connSet{ - conns: map[string]connSetItem{}, - } -} - -func (cs *connSet) Has(c net.Conn) bool { - cs.RLock() - defer cs.RUnlock() - - _, ok := cs.conns[c.RemoteAddr().String()] - - return ok -} - -func (cs *connSet) HasIP(ip net.IP) bool { - cs.RLock() - defer cs.RUnlock() - - for _, c := range cs.conns { - for _, known := range c.ips { - if known.Equal(ip) { - return true - } - } - } - - return false -} - -func (cs *connSet) Remove(c net.Conn) { - cs.Lock() - defer cs.Unlock() - - delete(cs.conns, c.RemoteAddr().String()) -} - -func (cs *connSet) RemoveAddr(addr net.Addr) { - cs.Lock() - defer cs.Unlock() - - delete(cs.conns, addr.String()) -} - -func (cs *connSet) Set(c net.Conn, ips []net.IP) { - cs.Lock() - defer cs.Unlock() - - cs.conns[c.RemoteAddr().String()] = connSetItem{ - conn: c, - ips: ips, - } -} diff --git a/internal/p2p/errors.go b/internal/p2p/errors.go index 648f2cb3a..d4df28792 100644 --- a/internal/p2p/errors.go +++ b/internal/p2p/errors.go @@ -17,7 +17,7 @@ func (e ErrFilterTimeout) Error() string { // ErrRejected indicates that a Peer was rejected carrying additional // information as to the reason. type ErrRejected struct { - addr NetAddress + addr NodeAddress conn net.Conn err error id types.NodeID @@ -30,7 +30,7 @@ type ErrRejected struct { } // Addr returns the NetAddress for the rejected Peer. -func (e ErrRejected) Addr() NetAddress { +func (e ErrRejected) Addr() NodeAddress { return e.addr } @@ -120,15 +120,15 @@ func (e ErrSwitchDuplicatePeerIP) Error() string { // ErrSwitchConnectToSelf to be raised when trying to connect to itself. type ErrSwitchConnectToSelf struct { - Addr *NetAddress + Addr *NodeAddress } func (e ErrSwitchConnectToSelf) Error() string { - return fmt.Sprintf("connect to self: %v", e.Addr) + return fmt.Sprintf("connect to self: %s", e.Addr) } type ErrSwitchAuthenticationFailure struct { - Dialed *NetAddress + Dialed *NodeAddress Got types.NodeID } diff --git a/internal/p2p/mock/peer.go b/internal/p2p/mock/peer.go deleted file mode 100644 index cede51768..000000000 --- a/internal/p2p/mock/peer.go +++ /dev/null @@ -1,70 +0,0 @@ -package mock - -import ( - "net" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -type Peer struct { - *service.BaseService - ip net.IP - id types.NodeID - addr *p2p.NetAddress - kv map[string]interface{} - Outbound, Persistent bool -} - -// NewPeer creates and starts a new mock peer. If the ip -// is nil, random routable address is used. -func NewPeer(ip net.IP) *Peer { - var netAddr *p2p.NetAddress - if ip == nil { - _, netAddr = p2p.CreateRoutableAddr() - } else { - netAddr = types.NewNetAddressIPPort(ip, 26656) - } - nodeKey := types.GenNodeKey() - netAddr.ID = nodeKey.ID - mp := &Peer{ - ip: ip, - id: nodeKey.ID, - addr: netAddr, - kv: make(map[string]interface{}), - } - mp.BaseService = service.NewBaseService(nil, "MockPeer", mp) - if err := mp.Start(); err != nil { - panic(err) - } - return mp -} - -func (mp *Peer) FlushStop() { mp.Stop() } //nolint:errcheck //ignore error -func (mp *Peer) TrySend(chID byte, msgBytes []byte) bool { return true } -func (mp *Peer) Send(chID byte, msgBytes []byte) bool { return true } -func (mp *Peer) NodeInfo() types.NodeInfo { - return types.NodeInfo{ - NodeID: mp.addr.ID, - ListenAddr: mp.addr.DialString(), - } -} -func (mp *Peer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } -func (mp *Peer) ID() types.NodeID { return mp.id } -func (mp *Peer) IsOutbound() bool { return mp.Outbound } -func (mp *Peer) IsPersistent() bool { return mp.Persistent } -func (mp *Peer) Get(key string) interface{} { - if value, ok := mp.kv[key]; ok { - return value - } - return nil -} -func (mp *Peer) Set(key string, value interface{}) { - mp.kv[key] = value -} -func (mp *Peer) RemoteIP() net.IP { return mp.ip } -func (mp *Peer) SocketAddr() *p2p.NetAddress { return mp.addr } -func (mp *Peer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } -func (mp *Peer) CloseConn() error { return nil } diff --git a/internal/p2p/mock/reactor.go b/internal/p2p/mock/reactor.go deleted file mode 100644 index d634a8032..000000000 --- a/internal/p2p/mock/reactor.go +++ /dev/null @@ -1,23 +0,0 @@ -package mock - -import ( - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/log" -) - -type Reactor struct { - p2p.BaseReactor -} - -func NewReactor() *Reactor { - r := &Reactor{} - r.BaseReactor = *p2p.NewBaseReactor("Mock-PEX", r) - r.SetLogger(log.TestingLogger()) - return r -} - -func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { return []*conn.ChannelDescriptor{} } -func (r *Reactor) AddPeer(peer p2p.Peer) {} -func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) {} -func (r *Reactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) {} diff --git a/internal/p2p/mocks/connection.go b/internal/p2p/mocks/connection.go index 6c6174117..65b9afafb 100644 --- a/internal/p2p/mocks/connection.go +++ b/internal/p2p/mocks/connection.go @@ -35,20 +35,6 @@ func (_m *Connection) Close() error { return r0 } -// FlushClose provides a mock function with given fields: -func (_m *Connection) FlushClose() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - // Handshake provides a mock function with given fields: _a0, _a1, _a2 func (_m *Connection) Handshake(_a0 context.Context, _a1 types.NodeInfo, _a2 crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) { ret := _m.Called(_a0, _a1, _a2) @@ -94,14 +80,14 @@ func (_m *Connection) LocalEndpoint() p2p.Endpoint { } // ReceiveMessage provides a mock function with given fields: -func (_m *Connection) ReceiveMessage() (p2p.ChannelID, []byte, error) { +func (_m *Connection) ReceiveMessage() (conn.ChannelID, []byte, error) { ret := _m.Called() - var r0 p2p.ChannelID - if rf, ok := ret.Get(0).(func() p2p.ChannelID); ok { + var r0 conn.ChannelID + if rf, ok := ret.Get(0).(func() conn.ChannelID); ok { r0 = rf() } else { - r0 = ret.Get(0).(p2p.ChannelID) + r0 = ret.Get(0).(conn.ChannelID) } var r1 []byte @@ -138,35 +124,14 @@ func (_m *Connection) RemoteEndpoint() p2p.Endpoint { } // SendMessage provides a mock function with given fields: _a0, _a1 -func (_m *Connection) SendMessage(_a0 p2p.ChannelID, _a1 []byte) (bool, error) { +func (_m *Connection) SendMessage(_a0 conn.ChannelID, _a1 []byte) error { ret := _m.Called(_a0, _a1) - var r0 bool - if rf, ok := ret.Get(0).(func(p2p.ChannelID, []byte) bool); ok { + var r0 error + if rf, ok := ret.Get(0).(func(conn.ChannelID, []byte) error); ok { r0 = rf(_a0, _a1) } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(p2p.ChannelID, []byte) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Status provides a mock function with given fields: -func (_m *Connection) Status() conn.ConnectionStatus { - ret := _m.Called() - - var r0 conn.ConnectionStatus - if rf, ok := ret.Get(0).(func() conn.ConnectionStatus); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(conn.ConnectionStatus) + r0 = ret.Error(0) } return r0 @@ -185,24 +150,3 @@ func (_m *Connection) String() string { return r0 } - -// TrySendMessage provides a mock function with given fields: _a0, _a1 -func (_m *Connection) TrySendMessage(_a0 p2p.ChannelID, _a1 []byte) (bool, error) { - ret := _m.Called(_a0, _a1) - - var r0 bool - if rf, ok := ret.Get(0).(func(p2p.ChannelID, []byte) bool); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(p2p.ChannelID, []byte) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/internal/p2p/mocks/peer.go b/internal/p2p/mocks/peer.go deleted file mode 100644 index b905c1156..000000000 --- a/internal/p2p/mocks/peer.go +++ /dev/null @@ -1,334 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - conn "github.com/tendermint/tendermint/internal/p2p/conn" - log "github.com/tendermint/tendermint/libs/log" - - mock "github.com/stretchr/testify/mock" - - net "net" - - types "github.com/tendermint/tendermint/types" -) - -// Peer is an autogenerated mock type for the Peer type -type Peer struct { - mock.Mock -} - -// CloseConn provides a mock function with given fields: -func (_m *Peer) CloseConn() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// FlushStop provides a mock function with given fields: -func (_m *Peer) FlushStop() { - _m.Called() -} - -// Get provides a mock function with given fields: _a0 -func (_m *Peer) Get(_a0 string) interface{} { - ret := _m.Called(_a0) - - var r0 interface{} - if rf, ok := ret.Get(0).(func(string) interface{}); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(interface{}) - } - } - - return r0 -} - -// ID provides a mock function with given fields: -func (_m *Peer) ID() types.NodeID { - ret := _m.Called() - - var r0 types.NodeID - if rf, ok := ret.Get(0).(func() types.NodeID); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(types.NodeID) - } - - return r0 -} - -// IsOutbound provides a mock function with given fields: -func (_m *Peer) IsOutbound() bool { - ret := _m.Called() - - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// IsPersistent provides a mock function with given fields: -func (_m *Peer) IsPersistent() bool { - ret := _m.Called() - - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// IsRunning provides a mock function with given fields: -func (_m *Peer) IsRunning() bool { - ret := _m.Called() - - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// NodeInfo provides a mock function with given fields: -func (_m *Peer) NodeInfo() types.NodeInfo { - ret := _m.Called() - - var r0 types.NodeInfo - if rf, ok := ret.Get(0).(func() types.NodeInfo); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(types.NodeInfo) - } - - return r0 -} - -// OnReset provides a mock function with given fields: -func (_m *Peer) OnReset() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// OnStart provides a mock function with given fields: -func (_m *Peer) OnStart() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// OnStop provides a mock function with given fields: -func (_m *Peer) OnStop() { - _m.Called() -} - -// Quit provides a mock function with given fields: -func (_m *Peer) Quit() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// RemoteAddr provides a mock function with given fields: -func (_m *Peer) RemoteAddr() net.Addr { - ret := _m.Called() - - var r0 net.Addr - if rf, ok := ret.Get(0).(func() net.Addr); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(net.Addr) - } - } - - return r0 -} - -// RemoteIP provides a mock function with given fields: -func (_m *Peer) RemoteIP() net.IP { - ret := _m.Called() - - var r0 net.IP - if rf, ok := ret.Get(0).(func() net.IP); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(net.IP) - } - } - - return r0 -} - -// Reset provides a mock function with given fields: -func (_m *Peer) Reset() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Send provides a mock function with given fields: _a0, _a1 -func (_m *Peer) Send(_a0 byte, _a1 []byte) bool { - ret := _m.Called(_a0, _a1) - - var r0 bool - if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// Set provides a mock function with given fields: _a0, _a1 -func (_m *Peer) Set(_a0 string, _a1 interface{}) { - _m.Called(_a0, _a1) -} - -// SetLogger provides a mock function with given fields: _a0 -func (_m *Peer) SetLogger(_a0 log.Logger) { - _m.Called(_a0) -} - -// SocketAddr provides a mock function with given fields: -func (_m *Peer) SocketAddr() *types.NetAddress { - ret := _m.Called() - - var r0 *types.NetAddress - if rf, ok := ret.Get(0).(func() *types.NetAddress); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.NetAddress) - } - } - - return r0 -} - -// Start provides a mock function with given fields: -func (_m *Peer) Start() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Status provides a mock function with given fields: -func (_m *Peer) Status() conn.ConnectionStatus { - ret := _m.Called() - - var r0 conn.ConnectionStatus - if rf, ok := ret.Get(0).(func() conn.ConnectionStatus); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(conn.ConnectionStatus) - } - - return r0 -} - -// Stop provides a mock function with given fields: -func (_m *Peer) Stop() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// String provides a mock function with given fields: -func (_m *Peer) String() string { - ret := _m.Called() - - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(string) - } - - return r0 -} - -// TrySend provides a mock function with given fields: _a0, _a1 -func (_m *Peer) TrySend(_a0 byte, _a1 []byte) bool { - ret := _m.Called(_a0, _a1) - - var r0 bool - if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok { - r0 = rf(_a0, _a1) - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// Wait provides a mock function with given fields: -func (_m *Peer) Wait() { - _m.Called() -} diff --git a/internal/p2p/mocks/transport.go b/internal/p2p/mocks/transport.go index 82bd670cb..2fc7baa29 100644 --- a/internal/p2p/mocks/transport.go +++ b/internal/p2p/mocks/transport.go @@ -5,7 +5,10 @@ package mocks import ( context "context" + conn "github.com/tendermint/tendermint/internal/p2p/conn" + mock "github.com/stretchr/testify/mock" + p2p "github.com/tendermint/tendermint/internal/p2p" ) @@ -37,6 +40,11 @@ func (_m *Transport) Accept() (p2p.Connection, error) { return r0, r1 } +// AddChannelDescriptors provides a mock function with given fields: _a0 +func (_m *Transport) AddChannelDescriptors(_a0 []*conn.ChannelDescriptor) { + _m.Called(_a0) +} + // Close provides a mock function with given fields: func (_m *Transport) Close() error { ret := _m.Called() diff --git a/internal/p2p/netaddress.go b/internal/p2p/netaddress.go deleted file mode 100644 index 6fce3a769..000000000 --- a/internal/p2p/netaddress.go +++ /dev/null @@ -1,11 +0,0 @@ -// Modified for Tendermint -// Originally Copyright (c) 2013-2014 Conformal Systems LLC. -// https://github.com/conformal/btcd/blob/master/LICENSE - -package p2p - -import ( - "github.com/tendermint/tendermint/types" -) - -type NetAddress = types.NetAddress diff --git a/internal/p2p/p2p_test.go b/internal/p2p/p2p_test.go index 6e524d492..642114a1d 100644 --- a/internal/p2p/p2p_test.go +++ b/internal/p2p/p2p_test.go @@ -6,6 +6,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/types" ) @@ -14,12 +15,12 @@ import ( var ( ctx = context.Background() chID = p2p.ChannelID(1) - chDesc = p2p.ChannelDescriptor{ - ID: byte(chID), + chDesc = &p2p.ChannelDescriptor{ + ID: chID, + MessageType: &p2ptest.Message{}, Priority: 5, SendQueueCapacity: 10, RecvMessageCapacity: 10, - MaxSendBytes: 1000, } selfKey crypto.PrivKey = ed25519.GenPrivKeyFromSecret([]byte{0xf9, 0x1b, 0x08, 0xaa, 0x38, 0xee, 0x34, 0xdd}) diff --git a/internal/p2p/p2ptest/network.go b/internal/p2p/p2ptest/network.go index 1daba3f14..c808ad3e0 100644 --- a/internal/p2p/p2ptest/network.go +++ b/internal/p2p/p2ptest/network.go @@ -6,7 +6,6 @@ import ( "testing" "time" - "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" @@ -137,13 +136,11 @@ func (n *Network) NodeIDs() []types.NodeID { // doing error checks and cleanups. func (n *Network) MakeChannels( t *testing.T, - chDesc p2p.ChannelDescriptor, - messageType proto.Message, - size int, + chDesc *p2p.ChannelDescriptor, ) map[types.NodeID]*p2p.Channel { channels := map[types.NodeID]*p2p.Channel{} for _, node := range n.Nodes { - channels[node.NodeID] = node.MakeChannel(t, chDesc, messageType, size) + channels[node.NodeID] = node.MakeChannel(t, chDesc) } return channels } @@ -153,13 +150,11 @@ func (n *Network) MakeChannels( // all the channels. func (n *Network) MakeChannelsNoCleanup( t *testing.T, - chDesc p2p.ChannelDescriptor, - messageType proto.Message, - size int, + chDesc *p2p.ChannelDescriptor, ) map[types.NodeID]*p2p.Channel { channels := map[types.NodeID]*p2p.Channel{} for _, node := range n.Nodes { - channels[node.NodeID] = node.MakeChannelNoCleanup(t, chDesc, messageType, size) + channels[node.NodeID] = node.MakeChannelNoCleanup(t, chDesc) } return channels } @@ -281,11 +276,13 @@ func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node { // MakeChannel opens a channel, with automatic error handling and cleanup. On // test cleanup, it also checks that the channel is empty, to make sure // all expected messages have been asserted. -func (n *Node) MakeChannel(t *testing.T, chDesc p2p.ChannelDescriptor, - messageType proto.Message, size int) *p2p.Channel { - channel, err := n.Router.OpenChannel(chDesc, messageType, size) +func (n *Node) MakeChannel( + t *testing.T, + chDesc *p2p.ChannelDescriptor, +) *p2p.Channel { + channel, err := n.Router.OpenChannel(chDesc) require.NoError(t, err) - require.Contains(t, n.Router.NodeInfo().Channels, chDesc.ID) + require.Contains(t, n.Router.NodeInfo().Channels, byte(chDesc.ID)) t.Cleanup(func() { RequireEmpty(t, channel) channel.Close() @@ -297,12 +294,9 @@ func (n *Node) MakeChannel(t *testing.T, chDesc p2p.ChannelDescriptor, // caller must ensure proper cleanup of the channel. func (n *Node) MakeChannelNoCleanup( t *testing.T, - chDesc p2p.ChannelDescriptor, - messageType proto.Message, - size int, + chDesc *p2p.ChannelDescriptor, ) *p2p.Channel { - - channel, err := n.Router.OpenChannel(chDesc, messageType, size) + channel, err := n.Router.OpenChannel(chDesc) require.NoError(t, err) return channel } @@ -333,12 +327,12 @@ func (n *Node) MakePeerUpdatesNoRequireEmpty(t *testing.T) *p2p.PeerUpdates { return sub } -func MakeChannelDesc(chID p2p.ChannelID) p2p.ChannelDescriptor { - return p2p.ChannelDescriptor{ - ID: byte(chID), +func MakeChannelDesc(chID p2p.ChannelID) *p2p.ChannelDescriptor { + return &p2p.ChannelDescriptor{ + ID: chID, + MessageType: &Message{}, Priority: 5, SendQueueCapacity: 10, RecvMessageCapacity: 10, - MaxSendBytes: 1000, } } diff --git a/internal/p2p/p2ptest/require.go b/internal/p2p/p2ptest/require.go index 3598baba0..a9fc16a34 100644 --- a/internal/p2p/p2ptest/require.go +++ b/internal/p2p/p2ptest/require.go @@ -24,6 +24,8 @@ func RequireEmpty(t *testing.T, channels ...*p2p.Channel) { // RequireReceive requires that the given envelope is received on the channel. func RequireReceive(t *testing.T, channel *p2p.Channel, expect p2p.Envelope) { + t.Helper() + timer := time.NewTimer(time.Second) // not time.After due to goroutine leaks defer timer.Stop() diff --git a/internal/p2p/peer.go b/internal/p2p/peer.go deleted file mode 100644 index 709a1294a..000000000 --- a/internal/p2p/peer.go +++ /dev/null @@ -1,371 +0,0 @@ -package p2p - -import ( - "fmt" - "io" - "net" - "runtime/debug" - "time" - - tmconn "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/cmap" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -//go:generate ../../scripts/mockery_generate.sh Peer - -const metricsTickerDuration = 10 * time.Second - -// Peer is an interface representing a peer connected on a reactor. -type Peer interface { - service.Service - FlushStop() - - ID() types.NodeID // peer's cryptographic ID - RemoteIP() net.IP // remote IP of the connection - RemoteAddr() net.Addr // remote address of the connection - - IsOutbound() bool // did we dial the peer - IsPersistent() bool // do we redial this peer when we disconnect - - CloseConn() error // close original connection - - NodeInfo() types.NodeInfo // peer's info - Status() tmconn.ConnectionStatus - SocketAddr() *NetAddress // actual address of the socket - - Send(byte, []byte) bool - TrySend(byte, []byte) bool - - Set(string, interface{}) - Get(string) interface{} -} - -//---------------------------------------------------------- - -// peerConn contains the raw connection and its config. -type peerConn struct { - outbound bool - persistent bool - conn Connection - ip net.IP // cached RemoteIP() -} - -func newPeerConn(outbound, persistent bool, conn Connection) peerConn { - return peerConn{ - outbound: outbound, - persistent: persistent, - conn: conn, - } -} - -// Return the IP from the connection RemoteAddr -func (pc peerConn) RemoteIP() net.IP { - if pc.ip == nil { - pc.ip = pc.conn.RemoteEndpoint().IP - } - return pc.ip -} - -// peer implements Peer. -// -// Before using a peer, you will need to perform a handshake on connection. -type peer struct { - service.BaseService - - // raw peerConn and the multiplex connection - peerConn - - // peer's node info and the channel it knows about - // channels = nodeInfo.Channels - // cached to avoid copying nodeInfo in hasChannel - nodeInfo types.NodeInfo - channels []byte - reactors map[byte]Reactor - onPeerError func(Peer, interface{}) - - // User data - Data *cmap.CMap - - metrics *Metrics - metricsTicker *time.Ticker -} - -type PeerOption func(*peer) - -func newPeer( - nodeInfo types.NodeInfo, - pc peerConn, - reactorsByCh map[byte]Reactor, - onPeerError func(Peer, interface{}), - options ...PeerOption, -) *peer { - p := &peer{ - peerConn: pc, - nodeInfo: nodeInfo, - channels: nodeInfo.Channels, - reactors: reactorsByCh, - onPeerError: onPeerError, - Data: cmap.NewCMap(), - metricsTicker: time.NewTicker(metricsTickerDuration), - metrics: NopMetrics(), - } - - p.BaseService = *service.NewBaseService(nil, "Peer", p) - for _, option := range options { - option(p) - } - - return p -} - -// onError calls the peer error callback. -func (p *peer) onError(err interface{}) { - p.onPeerError(p, err) -} - -// String representation. -func (p *peer) String() string { - if p.outbound { - return fmt.Sprintf("Peer{%v %v out}", p.conn, p.ID()) - } - - return fmt.Sprintf("Peer{%v %v in}", p.conn, p.ID()) -} - -//--------------------------------------------------- -// Implements service.Service - -// SetLogger implements BaseService. -func (p *peer) SetLogger(l log.Logger) { - p.Logger = l -} - -// OnStart implements BaseService. -func (p *peer) OnStart() error { - if err := p.BaseService.OnStart(); err != nil { - return err - } - - go p.processMessages() - go p.metricsReporter() - - return nil -} - -// processMessages processes messages received from the connection. -func (p *peer) processMessages() { - defer func() { - if r := recover(); r != nil { - p.Logger.Error("peer message processing panic", "err", r, "stack", string(debug.Stack())) - p.onError(fmt.Errorf("panic during peer message processing: %v", r)) - } - }() - - for { - chID, msg, err := p.conn.ReceiveMessage() - if err != nil { - p.onError(err) - return - } - reactor, ok := p.reactors[byte(chID)] - if !ok { - p.onError(fmt.Errorf("unknown channel %v", chID)) - return - } - reactor.Receive(byte(chID), p, msg) - } -} - -// FlushStop mimics OnStop but additionally ensures that all successful -// .Send() calls will get flushed before closing the connection. -// NOTE: it is not safe to call this method more than once. -func (p *peer) FlushStop() { - p.metricsTicker.Stop() - p.BaseService.OnStop() - if err := p.conn.FlushClose(); err != nil { - p.Logger.Debug("error while stopping peer", "err", err) - } -} - -// OnStop implements BaseService. -func (p *peer) OnStop() { - p.metricsTicker.Stop() - p.BaseService.OnStop() - if err := p.conn.Close(); err != nil { - p.Logger.Debug("error while stopping peer", "err", err) - } -} - -//--------------------------------------------------- -// Implements Peer - -// ID returns the peer's ID - the hex encoded hash of its pubkey. -func (p *peer) ID() types.NodeID { - return p.nodeInfo.ID() -} - -// IsOutbound returns true if the connection is outbound, false otherwise. -func (p *peer) IsOutbound() bool { - return p.peerConn.outbound -} - -// IsPersistent returns true if the peer is persitent, false otherwise. -func (p *peer) IsPersistent() bool { - return p.peerConn.persistent -} - -// NodeInfo returns a copy of the peer's NodeInfo. -func (p *peer) NodeInfo() types.NodeInfo { - return p.nodeInfo -} - -// SocketAddr returns the address of the socket. -// For outbound peers, it's the address dialed (after DNS resolution). -// For inbound peers, it's the address returned by the underlying connection -// (not what's reported in the peer's NodeInfo). -func (p *peer) SocketAddr() *NetAddress { - endpoint := p.peerConn.conn.RemoteEndpoint() - return &NetAddress{ - ID: p.ID(), - IP: endpoint.IP, - Port: endpoint.Port, - } -} - -// Status returns the peer's ConnectionStatus. -func (p *peer) Status() tmconn.ConnectionStatus { - return p.conn.Status() -} - -// Send msg bytes to the channel identified by chID byte. Returns false if the -// send queue is full after timeout, specified by MConnection. -func (p *peer) Send(chID byte, msgBytes []byte) bool { - if !p.IsRunning() { - // see Switch#Broadcast, where we fetch the list of peers and loop over - // them - while we're looping, one peer may be removed and stopped. - return false - } else if !p.hasChannel(chID) { - return false - } - res, err := p.conn.SendMessage(ChannelID(chID), msgBytes) - if err == io.EOF { - return false - } else if err != nil { - p.onError(err) - return false - } - if res { - labels := []string{ - "peer_id", string(p.ID()), - "chID", fmt.Sprintf("%#x", chID), - } - p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes))) - } - return res -} - -// TrySend msg bytes to the channel identified by chID byte. Immediately returns -// false if the send queue is full. -func (p *peer) TrySend(chID byte, msgBytes []byte) bool { - if !p.IsRunning() { - return false - } else if !p.hasChannel(chID) { - return false - } - res, err := p.conn.TrySendMessage(ChannelID(chID), msgBytes) - if err == io.EOF { - return false - } else if err != nil { - p.onError(err) - return false - } - if res { - labels := []string{ - "peer_id", string(p.ID()), - "chID", fmt.Sprintf("%#x", chID), - } - p.metrics.PeerSendBytesTotal.With(labels...).Add(float64(len(msgBytes))) - } - return res -} - -// Get the data for a given key. -func (p *peer) Get(key string) interface{} { - return p.Data.Get(key) -} - -// Set sets the data for the given key. -func (p *peer) Set(key string, data interface{}) { - p.Data.Set(key, data) -} - -// hasChannel returns true if the peer reported -// knowing about the given chID. -func (p *peer) hasChannel(chID byte) bool { - for _, ch := range p.channels { - if ch == chID { - return true - } - } - // NOTE: probably will want to remove this - // but could be helpful while the feature is new - p.Logger.Debug( - "Unknown channel for peer", - "channel", - chID, - "channels", - p.channels, - ) - return false -} - -// CloseConn closes original connection. Used for cleaning up in cases where the peer had not been started at all. -func (p *peer) CloseConn() error { - return p.peerConn.conn.Close() -} - -//--------------------------------------------------- -// methods only used for testing -// TODO: can we remove these? - -// CloseConn closes the underlying connection -func (pc *peerConn) CloseConn() { - pc.conn.Close() -} - -// RemoteAddr returns peer's remote network address. -func (p *peer) RemoteAddr() net.Addr { - endpoint := p.conn.RemoteEndpoint() - return &net.TCPAddr{ - IP: endpoint.IP, - Port: int(endpoint.Port), - } -} - -//--------------------------------------------------- - -func PeerMetrics(metrics *Metrics) PeerOption { - return func(p *peer) { - p.metrics = metrics - } -} - -func (p *peer) metricsReporter() { - for { - select { - case <-p.metricsTicker.C: - status := p.conn.Status() - var sendQueueSize float64 - for _, chStatus := range status.Channels { - sendQueueSize += float64(chStatus.SendQueueSize) - } - - p.metrics.PeerPendingSendBytes.With("peer_id", string(p.ID())).Set(sendQueueSize) - case <-p.Quit(): - return - } - } -} diff --git a/internal/p2p/peer_set.go b/internal/p2p/peer_set.go deleted file mode 100644 index 8d4ad4939..000000000 --- a/internal/p2p/peer_set.go +++ /dev/null @@ -1,149 +0,0 @@ -package p2p - -import ( - "net" - - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/types" -) - -// IPeerSet has a (immutable) subset of the methods of PeerSet. -type IPeerSet interface { - Has(key types.NodeID) bool - HasIP(ip net.IP) bool - Get(key types.NodeID) Peer - List() []Peer - Size() int -} - -//----------------------------------------------------------------------------- - -// PeerSet is a special structure for keeping a table of peers. -// Iteration over the peers is super fast and thread-safe. -type PeerSet struct { - mtx tmsync.Mutex - lookup map[types.NodeID]*peerSetItem - list []Peer -} - -type peerSetItem struct { - peer Peer - index int -} - -// NewPeerSet creates a new peerSet with a list of initial capacity of 256 items. -func NewPeerSet() *PeerSet { - return &PeerSet{ - lookup: make(map[types.NodeID]*peerSetItem), - list: make([]Peer, 0, 256), - } -} - -// Add adds the peer to the PeerSet. -// It returns an error carrying the reason, if the peer is already present. -func (ps *PeerSet) Add(peer Peer) error { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - if ps.lookup[peer.ID()] != nil { - return ErrSwitchDuplicatePeerID{peer.ID()} - } - - index := len(ps.list) - // Appending is safe even with other goroutines - // iterating over the ps.list slice. - ps.list = append(ps.list, peer) - ps.lookup[peer.ID()] = &peerSetItem{peer, index} - return nil -} - -// Has returns true if the set contains the peer referred to by this -// peerKey, otherwise false. -func (ps *PeerSet) Has(peerKey types.NodeID) bool { - ps.mtx.Lock() - _, ok := ps.lookup[peerKey] - ps.mtx.Unlock() - return ok -} - -// HasIP returns true if the set contains the peer referred to by this IP -// address, otherwise false. -func (ps *PeerSet) HasIP(peerIP net.IP) bool { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - return ps.hasIP(peerIP) -} - -// hasIP does not acquire a lock so it can be used in public methods which -// already lock. -func (ps *PeerSet) hasIP(peerIP net.IP) bool { - for _, item := range ps.lookup { - if item.peer.RemoteIP().Equal(peerIP) { - return true - } - } - - return false -} - -// Get looks up a peer by the provided peerKey. Returns nil if peer is not -// found. -func (ps *PeerSet) Get(peerKey types.NodeID) Peer { - ps.mtx.Lock() - defer ps.mtx.Unlock() - item, ok := ps.lookup[peerKey] - if ok { - return item.peer - } - return nil -} - -// Remove discards peer by its Key, if the peer was previously memoized. -// Returns true if the peer was removed, and false if it was not found. -// in the set. -func (ps *PeerSet) Remove(peer Peer) bool { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - item := ps.lookup[peer.ID()] - if item == nil { - return false - } - - index := item.index - // Create a new copy of the list but with one less item. - // (we must copy because we'll be mutating the list). - newList := make([]Peer, len(ps.list)-1) - copy(newList, ps.list) - // If it's the last peer, that's an easy special case. - if index == len(ps.list)-1 { - ps.list = newList - delete(ps.lookup, peer.ID()) - return true - } - - // Replace the popped item with the last item in the old list. - lastPeer := ps.list[len(ps.list)-1] - lastPeerKey := lastPeer.ID() - lastPeerItem := ps.lookup[lastPeerKey] - newList[index] = lastPeer - lastPeerItem.index = index - ps.list = newList - delete(ps.lookup, peer.ID()) - return true -} - -// Size returns the number of unique items in the peerSet. -func (ps *PeerSet) Size() int { - ps.mtx.Lock() - defer ps.mtx.Unlock() - return len(ps.list) -} - -// List returns the threadsafe list of peers. -func (ps *PeerSet) List() []Peer { - ps.mtx.Lock() - defer ps.mtx.Unlock() - return ps.list -} diff --git a/internal/p2p/peer_set_test.go b/internal/p2p/peer_set_test.go deleted file mode 100644 index 3e2397d2d..000000000 --- a/internal/p2p/peer_set_test.go +++ /dev/null @@ -1,190 +0,0 @@ -package p2p - -import ( - "net" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -// mockPeer for testing the PeerSet -type mockPeer struct { - service.BaseService - ip net.IP - id types.NodeID -} - -func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error -func (mp *mockPeer) TrySend(chID byte, msgBytes []byte) bool { return true } -func (mp *mockPeer) Send(chID byte, msgBytes []byte) bool { return true } -func (mp *mockPeer) NodeInfo() types.NodeInfo { return types.NodeInfo{} } -func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} } -func (mp *mockPeer) ID() types.NodeID { return mp.id } -func (mp *mockPeer) IsOutbound() bool { return false } -func (mp *mockPeer) IsPersistent() bool { return true } -func (mp *mockPeer) Get(s string) interface{} { return s } -func (mp *mockPeer) Set(string, interface{}) {} -func (mp *mockPeer) RemoteIP() net.IP { return mp.ip } -func (mp *mockPeer) SocketAddr() *NetAddress { return nil } -func (mp *mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.ip, Port: 8800} } -func (mp *mockPeer) CloseConn() error { return nil } - -// Returns a mock peer -func newMockPeer(ip net.IP) *mockPeer { - if ip == nil { - ip = net.IP{127, 0, 0, 1} - } - nodeKey := types.GenNodeKey() - return &mockPeer{ - ip: ip, - id: nodeKey.ID, - } -} - -func TestPeerSetAddRemoveOne(t *testing.T) { - t.Parallel() - - peerSet := NewPeerSet() - - var peerList []Peer - for i := 0; i < 5; i++ { - p := newMockPeer(net.IP{127, 0, 0, byte(i)}) - if err := peerSet.Add(p); err != nil { - t.Error(err) - } - peerList = append(peerList, p) - } - - n := len(peerList) - // 1. Test removing from the front - for i, peerAtFront := range peerList { - removed := peerSet.Remove(peerAtFront) - assert.True(t, removed) - wantSize := n - i - 1 - for j := 0; j < 2; j++ { - assert.Equal(t, false, peerSet.Has(peerAtFront.ID()), "#%d Run #%d: failed to remove peer", i, j) - assert.Equal(t, wantSize, peerSet.Size(), "#%d Run #%d: failed to remove peer and decrement size", i, j) - // Test the route of removing the now non-existent element - removed := peerSet.Remove(peerAtFront) - assert.False(t, removed) - } - } - - // 2. Next we are testing removing the peer at the end - // a) Replenish the peerSet - for _, peer := range peerList { - if err := peerSet.Add(peer); err != nil { - t.Error(err) - } - } - - // b) In reverse, remove each element - for i := n - 1; i >= 0; i-- { - peerAtEnd := peerList[i] - removed := peerSet.Remove(peerAtEnd) - assert.True(t, removed) - assert.Equal(t, false, peerSet.Has(peerAtEnd.ID()), "#%d: failed to remove item at end", i) - assert.Equal(t, i, peerSet.Size(), "#%d: differing sizes after peerSet.Remove(atEndPeer)", i) - } -} - -func TestPeerSetAddRemoveMany(t *testing.T) { - t.Parallel() - peerSet := NewPeerSet() - - peers := []Peer{} - N := 100 - for i := 0; i < N; i++ { - peer := newMockPeer(net.IP{127, 0, 0, byte(i)}) - if err := peerSet.Add(peer); err != nil { - t.Errorf("failed to add new peer") - } - if peerSet.Size() != i+1 { - t.Errorf("failed to add new peer and increment size") - } - peers = append(peers, peer) - } - - for i, peer := range peers { - removed := peerSet.Remove(peer) - assert.True(t, removed) - if peerSet.Has(peer.ID()) { - t.Errorf("failed to remove peer") - } - if peerSet.Size() != len(peers)-i-1 { - t.Errorf("failed to remove peer and decrement size") - } - } -} - -func TestPeerSetAddDuplicate(t *testing.T) { - t.Parallel() - peerSet := NewPeerSet() - peer := newMockPeer(nil) - - n := 20 - errsChan := make(chan error) - // Add the same asynchronously to test the - // concurrent guarantees of our APIs, and - // our expectation in the end is that only - // one addition succeeded, but the rest are - // instances of ErrSwitchDuplicatePeer. - for i := 0; i < n; i++ { - go func() { - errsChan <- peerSet.Add(peer) - }() - } - - // Now collect and tally the results - errsTally := make(map[string]int) - for i := 0; i < n; i++ { - err := <-errsChan - - switch err.(type) { - case ErrSwitchDuplicatePeerID: - errsTally["duplicateID"]++ - default: - errsTally["other"]++ - } - } - - // Our next procedure is to ensure that only one addition - // succeeded and that the rest are each ErrSwitchDuplicatePeer. - wantErrCount, gotErrCount := n-1, errsTally["duplicateID"] - assert.Equal(t, wantErrCount, gotErrCount, "invalid ErrSwitchDuplicatePeer count") - - wantNilErrCount, gotNilErrCount := 1, errsTally["other"] - assert.Equal(t, wantNilErrCount, gotNilErrCount, "invalid nil errCount") -} - -func TestPeerSetGet(t *testing.T) { - t.Parallel() - - var ( - peerSet = NewPeerSet() - peer = newMockPeer(nil) - ) - - assert.Nil(t, peerSet.Get(peer.ID()), "expecting a nil lookup, before .Add") - - if err := peerSet.Add(peer); err != nil { - t.Fatalf("Failed to add new peer: %v", err) - } - - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - // Add them asynchronously to test the - // concurrent guarantees of our APIs. - wg.Add(1) - go func(i int) { - defer wg.Done() - have, want := peerSet.Get(peer.ID()), peer - assert.Equal(t, have, want, "%d: have %v, want %v", i, have, want) - }(i) - } - wg.Wait() -} diff --git a/internal/p2p/peer_test.go b/internal/p2p/peer_test.go deleted file mode 100644 index dfe7bc798..000000000 --- a/internal/p2p/peer_test.go +++ /dev/null @@ -1,239 +0,0 @@ -package p2p - -import ( - "context" - "fmt" - golog "log" - "net" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/config" - tmconn "github.com/tendermint/tendermint/internal/p2p/conn" -) - -func TestPeerBasic(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - t.Cleanup(rp.Stop) - - p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), cfg, tmconn.DefaultMConnConfig()) - require.Nil(err) - - err = p.Start() - require.Nil(err) - t.Cleanup(func() { - if err := p.Stop(); err != nil { - t.Error(err) - } - }) - - assert.True(p.IsRunning()) - assert.True(p.IsOutbound()) - assert.False(p.IsPersistent()) - p.persistent = true - assert.True(p.IsPersistent()) - assert.Equal(rp.Addr().DialString(), p.RemoteAddr().String()) - assert.Equal(rp.ID(), p.ID()) -} - -func TestPeerSend(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - config := cfg - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: config} - rp.Start() - t.Cleanup(rp.Stop) - - p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config, tmconn.DefaultMConnConfig()) - require.Nil(err) - - err = p.Start() - require.Nil(err) - - t.Cleanup(func() { - if err := p.Stop(); err != nil { - t.Error(err) - } - }) - - assert.True(p.Send(testCh, []byte("Asylum"))) -} - -func createOutboundPeerAndPerformHandshake( - addr *NetAddress, - config *config.P2PConfig, - mConfig tmconn.MConnConfig, -) (*peer, error) { - chDescs := []*tmconn.ChannelDescriptor{ - {ID: testCh, Priority: 1}, - } - pk := ed25519.GenPrivKey() - ourNodeInfo := testNodeInfo(types.NodeIDFromPubKey(pk.PubKey()), "host_peer") - transport := NewMConnTransport(log.TestingLogger(), mConfig, chDescs, MConnTransportOptions{}) - reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)} - pc, err := testOutboundPeerConn(transport, addr, config, false, pk) - if err != nil { - return nil, err - } - peerInfo, _, err := pc.conn.Handshake(context.Background(), ourNodeInfo, pk) - if err != nil { - return nil, err - } - - p := newPeer(peerInfo, pc, reactorsByCh, func(p Peer, r interface{}) {}) - p.SetLogger(log.TestingLogger().With("peer", addr)) - return p, nil -} - -func testDial(addr *NetAddress, cfg *config.P2PConfig) (net.Conn, error) { - if cfg.TestDialFail { - return nil, fmt.Errorf("dial err (peerConfig.DialFail == true)") - } - - conn, err := addr.DialTimeout(cfg.DialTimeout) - if err != nil { - return nil, err - } - return conn, nil -} - -func testOutboundPeerConn( - transport *MConnTransport, - addr *NetAddress, - config *config.P2PConfig, - persistent bool, - ourNodePrivKey crypto.PrivKey, -) (peerConn, error) { - - var pc peerConn - conn, err := testDial(addr, config) - if err != nil { - return pc, fmt.Errorf("error creating peer: %w", err) - } - - pc, err = testPeerConn(transport, conn, true, persistent) - if err != nil { - if cerr := conn.Close(); cerr != nil { - return pc, fmt.Errorf("%v: %w", cerr.Error(), err) - } - return pc, err - } - - return pc, nil -} - -type remotePeer struct { - PrivKey crypto.PrivKey - Config *config.P2PConfig - Network string - addr *NetAddress - channels bytes.HexBytes - listenAddr string - listener net.Listener -} - -func (rp *remotePeer) Addr() *NetAddress { - return rp.addr -} - -func (rp *remotePeer) ID() types.NodeID { - return types.NodeIDFromPubKey(rp.PrivKey.PubKey()) -} - -func (rp *remotePeer) Start() { - if rp.listenAddr == "" { - rp.listenAddr = "127.0.0.1:0" - } - - l, e := net.Listen("tcp", rp.listenAddr) // any available address - if e != nil { - golog.Fatalf("net.Listen tcp :0: %+v", e) - } - rp.listener = l - rp.addr = types.NewNetAddress(types.NodeIDFromPubKey(rp.PrivKey.PubKey()), l.Addr()) - if rp.channels == nil { - rp.channels = []byte{testCh} - } - go rp.accept() -} - -func (rp *remotePeer) Stop() { - rp.listener.Close() -} - -func (rp *remotePeer) Dial(addr *NetAddress) (net.Conn, error) { - transport := NewMConnTransport(log.TestingLogger(), MConnConfig(rp.Config), - []*ChannelDescriptor{}, MConnTransportOptions{}) - conn, err := addr.DialTimeout(1 * time.Second) - if err != nil { - return nil, err - } - pc, err := testInboundPeerConn(transport, conn) - if err != nil { - return nil, err - } - _, _, err = pc.conn.Handshake(context.Background(), rp.nodeInfo(), rp.PrivKey) - if err != nil { - return nil, err - } - return conn, err -} - -func (rp *remotePeer) accept() { - transport := NewMConnTransport(log.TestingLogger(), MConnConfig(rp.Config), - []*ChannelDescriptor{}, MConnTransportOptions{}) - conns := []net.Conn{} - - for { - conn, err := rp.listener.Accept() - if err != nil { - golog.Printf("Failed to accept conn: %+v", err) - for _, conn := range conns { - _ = conn.Close() - } - return - } - - pc, err := testInboundPeerConn(transport, conn) - if err != nil { - golog.Printf("Failed to create a peer: %+v", err) - } - _, _, err = pc.conn.Handshake(context.Background(), rp.nodeInfo(), rp.PrivKey) - if err != nil { - golog.Printf("Failed to handshake a peer: %+v", err) - } - - conns = append(conns, conn) - } -} - -func (rp *remotePeer) nodeInfo() types.NodeInfo { - ni := types.NodeInfo{ - ProtocolVersion: defaultProtocolVersion, - NodeID: rp.Addr().ID, - ListenAddr: rp.listener.Addr().String(), - Network: "testing", - Version: "1.2.3-rc0-deadbeef", - Channels: rp.channels, - Moniker: "remote_peer", - } - if rp.Network != "" { - ni.Network = rp.Network - } - return ni -} diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index 1e9afb38b..7ccc0d59c 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -180,7 +180,7 @@ func (o *PeerManagerOptions) Validate() error { if o.MaxPeers > 0 { if o.MaxConnected == 0 || o.MaxConnected+o.MaxConnectedUpgrade > o.MaxPeers { - return fmt.Errorf("MaxConnected %v and MaxConnectedUpgrade %v can't exceed MaxPeers %v", // nolint + return fmt.Errorf("MaxConnected %v and MaxConnectedUpgrade %v can't exceed MaxPeers %v", o.MaxConnected, o.MaxConnectedUpgrade, o.MaxPeers) } } @@ -190,7 +190,7 @@ func (o *PeerManagerOptions) Validate() error { return errors.New("can't set MaxRetryTime without MinRetryTime") } if o.MinRetryTime > o.MaxRetryTime { - return fmt.Errorf("MinRetryTime %v is greater than MaxRetryTime %v", // nolint + return fmt.Errorf("MinRetryTime %v is greater than MaxRetryTime %v", o.MinRetryTime, o.MaxRetryTime) } } @@ -200,7 +200,7 @@ func (o *PeerManagerOptions) Validate() error { return errors.New("can't set MaxRetryTimePersistent without MinRetryTime") } if o.MinRetryTime > o.MaxRetryTimePersistent { - return fmt.Errorf("MinRetryTime %v is greater than MaxRetryTimePersistent %v", // nolint + return fmt.Errorf("MinRetryTime %v is greater than MaxRetryTimePersistent %v", o.MinRetryTime, o.MaxRetryTimePersistent) } } diff --git a/internal/p2p/peermanager_scoring_test.go b/internal/p2p/peermanager_scoring_test.go index 0825af948..edb5fc6fc 100644 --- a/internal/p2p/peermanager_scoring_test.go +++ b/internal/p2p/peermanager_scoring_test.go @@ -6,9 +6,10 @@ import ( "time" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) func TestPeerScoring(t *testing.T) { diff --git a/internal/p2p/pex/addrbook.go b/internal/p2p/pex/addrbook.go deleted file mode 100644 index 6c5f78663..000000000 --- a/internal/p2p/pex/addrbook.go +++ /dev/null @@ -1,948 +0,0 @@ -// Modified for Tendermint -// Originally Copyright (c) 2013-2014 Conformal Systems LLC. -// https://github.com/conformal/btcd/blob/master/LICENSE - -package pex - -import ( - "encoding/binary" - "fmt" - "hash" - "math" - mrand "math/rand" - "net" - "sync" - "time" - - "github.com/minio/highwayhash" - "github.com/tendermint/tendermint/crypto" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p" - tmmath "github.com/tendermint/tendermint/libs/math" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -const ( - bucketTypeNew = 0x01 - bucketTypeOld = 0x02 -) - -// AddrBook is an address book used for tracking peers -// so we can gossip about them to others and select -// peers to dial. -// TODO: break this up? -type AddrBook interface { - service.Service - - // Add our own addresses so we don't later add ourselves - AddOurAddress(*p2p.NetAddress) - // Check if it is our address - OurAddress(*p2p.NetAddress) bool - - AddPrivateIDs([]string) - - // Add and remove an address - AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error - RemoveAddress(*p2p.NetAddress) - - // Check if the address is in the book - HasAddress(*p2p.NetAddress) bool - - // Do we need more peers? - NeedMoreAddrs() bool - // Is Address Book Empty? Answer should not depend on being in your own - // address book, or private peers - Empty() bool - - // Pick an address to dial - PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress - - // Mark address - MarkGood(types.NodeID) - MarkAttempt(*p2p.NetAddress) - MarkBad(*p2p.NetAddress, time.Duration) // Move peer to bad peers list - // Add bad peers back to addrBook - ReinstateBadPeers() - - IsGood(*p2p.NetAddress) bool - IsBanned(*p2p.NetAddress) bool - - // Send a selection of addresses to peers - GetSelection() []*p2p.NetAddress - // Send a selection of addresses with bias - GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress - - Size() int - - // Persist to disk - Save() -} - -var _ AddrBook = (*addrBook)(nil) - -// addrBook - concurrency safe peer address manager. -// Implements AddrBook. -type addrBook struct { - service.BaseService - - // accessed concurrently - mtx tmsync.Mutex - ourAddrs map[string]struct{} - privateIDs map[types.NodeID]struct{} - addrLookup map[types.NodeID]*knownAddress // new & old - badPeers map[types.NodeID]*knownAddress // blacklisted peers - bucketsOld []map[string]*knownAddress - bucketsNew []map[string]*knownAddress - nOld int - nNew int - - // immutable after creation - filePath string - key string // random prefix for bucket placement - routabilityStrict bool - hasher hash.Hash64 - - wg sync.WaitGroup -} - -func mustNewHasher() hash.Hash64 { - key := crypto.CRandBytes(highwayhash.Size) - hasher, err := highwayhash.New64(key) - if err != nil { - panic(err) - } - return hasher -} - -// NewAddrBook creates a new address book. -// Use Start to begin processing asynchronous address updates. -func NewAddrBook(filePath string, routabilityStrict bool) AddrBook { - am := &addrBook{ - ourAddrs: make(map[string]struct{}), - privateIDs: make(map[types.NodeID]struct{}), - addrLookup: make(map[types.NodeID]*knownAddress), - badPeers: make(map[types.NodeID]*knownAddress), - filePath: filePath, - routabilityStrict: routabilityStrict, - } - am.init() - am.BaseService = *service.NewBaseService(nil, "AddrBook", am) - return am -} - -// Initialize the buckets. -// When modifying this, don't forget to update loadFromFile() -func (a *addrBook) init() { - a.key = crypto.CRandHex(24) // 24/2 * 8 = 96 bits - // New addr buckets - a.bucketsNew = make([]map[string]*knownAddress, newBucketCount) - for i := range a.bucketsNew { - a.bucketsNew[i] = make(map[string]*knownAddress) - } - // Old addr buckets - a.bucketsOld = make([]map[string]*knownAddress, oldBucketCount) - for i := range a.bucketsOld { - a.bucketsOld[i] = make(map[string]*knownAddress) - } - a.hasher = mustNewHasher() -} - -// OnStart implements Service. -func (a *addrBook) OnStart() error { - if err := a.BaseService.OnStart(); err != nil { - return err - } - a.loadFromFile(a.filePath) - - // wg.Add to ensure that any invocation of .Wait() - // later on will wait for saveRoutine to terminate. - a.wg.Add(1) - go a.saveRoutine() - - return nil -} - -// OnStop implements Service. -func (a *addrBook) OnStop() { - a.BaseService.OnStop() -} - -func (a *addrBook) Wait() { - a.wg.Wait() -} - -func (a *addrBook) FilePath() string { - return a.filePath -} - -//------------------------------------------------------- - -// AddOurAddress one of our addresses. -func (a *addrBook) AddOurAddress(addr *p2p.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - - a.Logger.Info("Add our address to book", "addr", addr) - a.ourAddrs[addr.String()] = struct{}{} -} - -// OurAddress returns true if it is our address. -func (a *addrBook) OurAddress(addr *p2p.NetAddress) bool { - a.mtx.Lock() - defer a.mtx.Unlock() - - _, ok := a.ourAddrs[addr.String()] - return ok -} - -func (a *addrBook) AddPrivateIDs(ids []string) { - a.mtx.Lock() - defer a.mtx.Unlock() - - for _, id := range ids { - a.privateIDs[types.NodeID(id)] = struct{}{} - } -} - -// AddAddress implements AddrBook -// Add address to a "new" bucket. If it's already in one, only add it probabilistically. -// Returns error if the addr is non-routable. Does not add self. -// NOTE: addr must not be nil -func (a *addrBook) AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error { - a.mtx.Lock() - defer a.mtx.Unlock() - - return a.addAddress(addr, src) -} - -// RemoveAddress implements AddrBook - removes the address from the book. -func (a *addrBook) RemoveAddress(addr *p2p.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - - a.removeAddress(addr) -} - -// IsGood returns true if peer was ever marked as good and haven't -// done anything wrong since then. -func (a *addrBook) IsGood(addr *p2p.NetAddress) bool { - a.mtx.Lock() - defer a.mtx.Unlock() - - return a.addrLookup[addr.ID].isOld() -} - -// IsBanned returns true if the peer is currently banned -func (a *addrBook) IsBanned(addr *p2p.NetAddress) bool { - a.mtx.Lock() - _, ok := a.badPeers[addr.ID] - a.mtx.Unlock() - - return ok -} - -// HasAddress returns true if the address is in the book. -func (a *addrBook) HasAddress(addr *p2p.NetAddress) bool { - a.mtx.Lock() - defer a.mtx.Unlock() - - ka := a.addrLookup[addr.ID] - return ka != nil -} - -// NeedMoreAddrs implements AddrBook - returns true if there are not have enough addresses in the book. -func (a *addrBook) NeedMoreAddrs() bool { - return a.Size() < needAddressThreshold -} - -// Empty implements AddrBook - returns true if there are no addresses in the address book. -// Does not count the peer appearing in its own address book, or private peers. -func (a *addrBook) Empty() bool { - return a.Size() == 0 -} - -// PickAddress implements AddrBook. It picks an address to connect to. -// The address is picked randomly from an old or new bucket according -// to the biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to that range) -// and determines how biased we are to pick an address from a new bucket. -// PickAddress returns nil if the AddrBook is empty or if we try to pick -// from an empty bucket. -// nolint:gosec // G404: Use of weak random number generator -func (a *addrBook) PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - bookSize := a.size() - if bookSize <= 0 { - if bookSize < 0 { - panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld)) - } - return nil - } - if biasTowardsNewAddrs > 100 { - biasTowardsNewAddrs = 100 - } - if biasTowardsNewAddrs < 0 { - biasTowardsNewAddrs = 0 - } - - // Bias between new and old addresses. - oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(biasTowardsNewAddrs)) - newCorrelation := math.Sqrt(float64(a.nNew)) * float64(biasTowardsNewAddrs) - - // pick a random peer from a random bucket - var bucket map[string]*knownAddress - pickFromOldBucket := (newCorrelation+oldCorrelation)*mrand.Float64() < oldCorrelation - if (pickFromOldBucket && a.nOld == 0) || - (!pickFromOldBucket && a.nNew == 0) { - return nil - } - // loop until we pick a random non-empty bucket - for len(bucket) == 0 { - if pickFromOldBucket { - bucket = a.bucketsOld[mrand.Intn(len(a.bucketsOld))] - } else { - bucket = a.bucketsNew[mrand.Intn(len(a.bucketsNew))] - } - } - // pick a random index and loop over the map to return that index - randIndex := mrand.Intn(len(bucket)) - for _, ka := range bucket { - if randIndex == 0 { - return ka.Addr - } - randIndex-- - } - return nil -} - -// MarkGood implements AddrBook - it marks the peer as good and -// moves it into an "old" bucket. -func (a *addrBook) MarkGood(id types.NodeID) { - a.mtx.Lock() - defer a.mtx.Unlock() - - ka := a.addrLookup[id] - if ka == nil { - return - } - ka.markGood() - if ka.isNew() { - if err := a.moveToOld(ka); err != nil { - a.Logger.Error("Error moving address to old", "err", err) - } - } -} - -// MarkAttempt implements AddrBook - it marks that an attempt was made to connect to the address. -func (a *addrBook) MarkAttempt(addr *p2p.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - - ka := a.addrLookup[addr.ID] - if ka == nil { - return - } - ka.markAttempt() -} - -// MarkBad implements AddrBook. Kicks address out from book, places -// the address in the badPeers pool. -func (a *addrBook) MarkBad(addr *p2p.NetAddress, banTime time.Duration) { - a.mtx.Lock() - defer a.mtx.Unlock() - - if a.addBadPeer(addr, banTime) { - a.removeAddress(addr) - } -} - -// ReinstateBadPeers removes bad peers from ban list and places them into a new -// bucket. -func (a *addrBook) ReinstateBadPeers() { - a.mtx.Lock() - defer a.mtx.Unlock() - - for _, ka := range a.badPeers { - if ka.isBanned() { - continue - } - - bucket, err := a.calcNewBucket(ka.Addr, ka.Src) - if err != nil { - a.Logger.Error("Failed to calculate new bucket (bad peer won't be reinstantiated)", - "addr", ka.Addr, "err", err) - continue - } - - if err := a.addToNewBucket(ka, bucket); err != nil { - a.Logger.Error("Error adding peer to new bucket", "err", err) - } - delete(a.badPeers, ka.ID()) - - a.Logger.Info("Reinstated address", "addr", ka.Addr) - } -} - -// GetSelection implements AddrBook. -// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols. -// Must never return a nil address. -func (a *addrBook) GetSelection() []*p2p.NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - bookSize := a.size() - if bookSize <= 0 { - if bookSize < 0 { - panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld)) - } - return nil - } - - numAddresses := tmmath.MaxInt( - tmmath.MinInt(minGetSelection, bookSize), - bookSize*getSelectionPercent/100) - numAddresses = tmmath.MinInt(maxGetSelection, numAddresses) - - // XXX: instead of making a list of all addresses, shuffling, and slicing a random chunk, - // could we just select a random numAddresses of indexes? - allAddr := make([]*p2p.NetAddress, bookSize) - i := 0 - for _, ka := range a.addrLookup { - allAddr[i] = ka.Addr - i++ - } - - // Fisher-Yates shuffle the array. We only need to do the first - // `numAddresses' since we are throwing the rest. - for i := 0; i < numAddresses; i++ { - // pick a number between current index and the end - // nolint:gosec // G404: Use of weak random number generator - j := mrand.Intn(len(allAddr)-i) + i - allAddr[i], allAddr[j] = allAddr[j], allAddr[i] - } - - // slice off the limit we are willing to share. - return allAddr[:numAddresses] -} - -func percentageOfNum(p, n int) int { - return int(math.Round((float64(p) / float64(100)) * float64(n))) -} - -// GetSelectionWithBias implements AddrBook. -// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols. -// Must never return a nil address. -// -// Each address is picked randomly from an old or new bucket according to the -// biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to -// that range) and determines how biased we are to pick an address from a new -// bucket. -func (a *addrBook) GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - bookSize := a.size() - if bookSize <= 0 { - if bookSize < 0 { - panic(fmt.Sprintf("Addrbook size %d (new: %d + old: %d) is less than 0", a.nNew+a.nOld, a.nNew, a.nOld)) - } - return nil - } - - if biasTowardsNewAddrs > 100 { - biasTowardsNewAddrs = 100 - } - if biasTowardsNewAddrs < 0 { - biasTowardsNewAddrs = 0 - } - - numAddresses := tmmath.MaxInt( - tmmath.MinInt(minGetSelection, bookSize), - bookSize*getSelectionPercent/100) - numAddresses = tmmath.MinInt(maxGetSelection, numAddresses) - - // number of new addresses that, if possible, should be in the beginning of the selection - // if there are no enough old addrs, will choose new addr instead. - numRequiredNewAdd := tmmath.MaxInt(percentageOfNum(biasTowardsNewAddrs, numAddresses), numAddresses-a.nOld) - selection := a.randomPickAddresses(bucketTypeNew, numRequiredNewAdd) - selection = append(selection, a.randomPickAddresses(bucketTypeOld, numAddresses-len(selection))...) - return selection -} - -//------------------------------------------------ - -// Size returns the number of addresses in the book. -func (a *addrBook) Size() int { - a.mtx.Lock() - defer a.mtx.Unlock() - - return a.size() -} - -func (a *addrBook) size() int { - return a.nNew + a.nOld -} - -//---------------------------------------------------------- - -// Save persists the address book to disk. -func (a *addrBook) Save() { - a.saveToFile(a.filePath) // thread safe -} - -func (a *addrBook) saveRoutine() { - defer a.wg.Done() - - saveFileTicker := time.NewTicker(dumpAddressInterval) -out: - for { - select { - case <-saveFileTicker.C: - a.saveToFile(a.filePath) - case <-a.Quit(): - break out - } - } - saveFileTicker.Stop() - a.saveToFile(a.filePath) -} - -//---------------------------------------------------------- - -func (a *addrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress { - switch bucketType { - case bucketTypeNew: - return a.bucketsNew[bucketIdx] - case bucketTypeOld: - return a.bucketsOld[bucketIdx] - default: - panic("Invalid bucket type") - } -} - -// Adds ka to new bucket. Returns false if it couldn't do it cuz buckets full. -// NOTE: currently it always returns true. -func (a *addrBook) addToNewBucket(ka *knownAddress, bucketIdx int) error { - // Consistency check to ensure we don't add an already known address - if ka.isOld() { - return errAddrBookOldAddressNewBucket{ka.Addr, bucketIdx} - } - - addrStr := ka.Addr.String() - bucket := a.getBucket(bucketTypeNew, bucketIdx) - - // Already exists? - if _, ok := bucket[addrStr]; ok { - return nil - } - - // Enforce max addresses. - if len(bucket) > newBucketSize { - a.Logger.Info("new bucket is full, expiring new") - a.expireNew(bucketIdx) - } - - // Add to bucket. - bucket[addrStr] = ka - // increment nNew if the peer doesnt already exist in a bucket - if ka.addBucketRef(bucketIdx) == 1 { - a.nNew++ - } - - // Add it to addrLookup - a.addrLookup[ka.ID()] = ka - return nil -} - -// Adds ka to old bucket. Returns false if it couldn't do it cuz buckets full. -func (a *addrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool { - // Sanity check - if ka.isNew() { - a.Logger.Error(fmt.Sprintf("Cannot add new address to old bucket: %v", ka)) - return false - } - if len(ka.Buckets) != 0 { - a.Logger.Error(fmt.Sprintf("Cannot add already old address to another old bucket: %v", ka)) - return false - } - - addrStr := ka.Addr.String() - bucket := a.getBucket(bucketTypeOld, bucketIdx) - - // Already exists? - if _, ok := bucket[addrStr]; ok { - return true - } - - // Enforce max addresses. - if len(bucket) > oldBucketSize { - return false - } - - // Add to bucket. - bucket[addrStr] = ka - if ka.addBucketRef(bucketIdx) == 1 { - a.nOld++ - } - - // Ensure in addrLookup - a.addrLookup[ka.ID()] = ka - - return true -} - -func (a *addrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) { - if ka.BucketType != bucketType { - a.Logger.Error(fmt.Sprintf("Bucket type mismatch: %v", ka)) - return - } - bucket := a.getBucket(bucketType, bucketIdx) - delete(bucket, ka.Addr.String()) - if ka.removeBucketRef(bucketIdx) == 0 { - if bucketType == bucketTypeNew { - a.nNew-- - } else { - a.nOld-- - } - delete(a.addrLookup, ka.ID()) - } -} - -func (a *addrBook) removeFromAllBuckets(ka *knownAddress) { - for _, bucketIdx := range ka.Buckets { - bucket := a.getBucket(ka.BucketType, bucketIdx) - delete(bucket, ka.Addr.String()) - } - ka.Buckets = nil - if ka.BucketType == bucketTypeNew { - a.nNew-- - } else { - a.nOld-- - } - delete(a.addrLookup, ka.ID()) -} - -//---------------------------------------------------------- - -func (a *addrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress { - bucket := a.getBucket(bucketType, bucketIdx) - var oldest *knownAddress - for _, ka := range bucket { - if oldest == nil || ka.LastAttempt.Before(oldest.LastAttempt) { - oldest = ka - } - } - return oldest -} - -// adds the address to a "new" bucket. if its already in one, -// it only adds it probabilistically -func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { - if addr == nil || src == nil { - return ErrAddrBookNilAddr{addr, src} - } - - if err := addr.Valid(); err != nil { - return ErrAddrBookInvalidAddr{Addr: addr, AddrErr: err} - } - - if _, ok := a.badPeers[addr.ID]; ok { - return ErrAddressBanned{addr} - } - - if _, ok := a.privateIDs[addr.ID]; ok { - return ErrAddrBookPrivate{addr} - } - - if _, ok := a.privateIDs[src.ID]; ok { - return ErrAddrBookPrivateSrc{src} - } - - // TODO: we should track ourAddrs by ID and by IP:PORT and refuse both. - if _, ok := a.ourAddrs[addr.String()]; ok { - return ErrAddrBookSelf{addr} - } - - if a.routabilityStrict && !addr.Routable() { - return ErrAddrBookNonRoutable{addr} - } - - ka := a.addrLookup[addr.ID] - if ka != nil { - // If its already old and the address ID's are the same, ignore it. - // Thereby avoiding issues with a node on the network attempting to change - // the IP of a known node ID. (Which could yield an eclipse attack on the node) - if ka.isOld() && ka.Addr.ID == addr.ID { - return nil - } - // Already in max new buckets. - if len(ka.Buckets) == maxNewBucketsPerAddress { - return nil - } - // The more entries we have, the less likely we are to add more. - factor := int32(2 * len(ka.Buckets)) - // nolint:gosec // G404: Use of weak random number generator - if mrand.Int31n(factor) != 0 { - return nil - } - } else { - ka = newKnownAddress(addr, src) - } - - bucket, err := a.calcNewBucket(addr, src) - if err != nil { - return err - } - return a.addToNewBucket(ka, bucket) -} - -func (a *addrBook) randomPickAddresses(bucketType byte, num int) []*p2p.NetAddress { - var buckets []map[string]*knownAddress - switch bucketType { - case bucketTypeNew: - buckets = a.bucketsNew - case bucketTypeOld: - buckets = a.bucketsOld - default: - panic("unexpected bucketType") - } - total := 0 - for _, bucket := range buckets { - total += len(bucket) - } - addresses := make([]*knownAddress, 0, total) - for _, bucket := range buckets { - for _, ka := range bucket { - addresses = append(addresses, ka) - } - } - selection := make([]*p2p.NetAddress, 0, num) - chosenSet := make(map[string]bool, num) - rand := tmrand.NewRand() - rand.Shuffle(total, func(i, j int) { - addresses[i], addresses[j] = addresses[j], addresses[i] - }) - for _, addr := range addresses { - if chosenSet[addr.Addr.String()] { - continue - } - chosenSet[addr.Addr.String()] = true - selection = append(selection, addr.Addr) - if len(selection) >= num { - return selection - } - } - return selection -} - -// Make space in the new buckets by expiring the really bad entries. -// If no bad entries are available we remove the oldest. -func (a *addrBook) expireNew(bucketIdx int) { - for addrStr, ka := range a.bucketsNew[bucketIdx] { - // If an entry is bad, throw it away - if ka.isBad() { - a.Logger.Info(fmt.Sprintf("expiring bad address %v", addrStr)) - a.removeFromBucket(ka, bucketTypeNew, bucketIdx) - return - } - } - - // If we haven't thrown out a bad entry, throw out the oldest entry - oldest := a.pickOldest(bucketTypeNew, bucketIdx) - a.removeFromBucket(oldest, bucketTypeNew, bucketIdx) -} - -// Promotes an address from new to old. If the destination bucket is full, -// demote the oldest one to a "new" bucket. -// TODO: Demote more probabilistically? -func (a *addrBook) moveToOld(ka *knownAddress) error { - // Sanity check - if ka.isOld() { - a.Logger.Error(fmt.Sprintf("Cannot promote address that is already old %v", ka)) - return nil - } - if len(ka.Buckets) == 0 { - a.Logger.Error(fmt.Sprintf("Cannot promote address that isn't in any new buckets %v", ka)) - return nil - } - - // Remove from all (new) buckets. - a.removeFromAllBuckets(ka) - // It's officially old now. - ka.BucketType = bucketTypeOld - - // Try to add it to its oldBucket destination. - oldBucketIdx, err := a.calcOldBucket(ka.Addr) - if err != nil { - return err - } - added := a.addToOldBucket(ka, oldBucketIdx) - if !added { - // No room; move the oldest to a new bucket - oldest := a.pickOldest(bucketTypeOld, oldBucketIdx) - a.removeFromBucket(oldest, bucketTypeOld, oldBucketIdx) - newBucketIdx, err := a.calcNewBucket(oldest.Addr, oldest.Src) - if err != nil { - return err - } - if err := a.addToNewBucket(oldest, newBucketIdx); err != nil { - a.Logger.Error("Error adding peer to old bucket", "err", err) - } - - // Finally, add our ka to old bucket again. - added = a.addToOldBucket(ka, oldBucketIdx) - if !added { - a.Logger.Error(fmt.Sprintf("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx)) - } - } - return nil -} - -func (a *addrBook) removeAddress(addr *p2p.NetAddress) { - ka := a.addrLookup[addr.ID] - if ka == nil { - return - } - a.Logger.Info("Remove address from book", "addr", addr) - a.removeFromAllBuckets(ka) -} - -func (a *addrBook) addBadPeer(addr *p2p.NetAddress, banTime time.Duration) bool { - // check it exists in addrbook - ka := a.addrLookup[addr.ID] - // check address is not already there - if ka == nil { - return false - } - - if _, alreadyBadPeer := a.badPeers[addr.ID]; !alreadyBadPeer { - // add to bad peer list - ka.ban(banTime) - a.badPeers[addr.ID] = ka - a.Logger.Info("Add address to blacklist", "addr", addr) - } - return true -} - -//--------------------------------------------------------------------- -// calculate bucket placements - -// hash(key + sourcegroup + int64(hash(key + group + sourcegroup)) % bucket_per_group) % num_new_buckets -func (a *addrBook) calcNewBucket(addr, src *p2p.NetAddress) (int, error) { - data1 := []byte{} - data1 = append(data1, []byte(a.key)...) - data1 = append(data1, []byte(a.groupKey(addr))...) - data1 = append(data1, []byte(a.groupKey(src))...) - hash1, err := a.hash(data1) - if err != nil { - return 0, err - } - hash64 := binary.BigEndian.Uint64(hash1) - hash64 %= newBucketsPerGroup - var hashbuf [8]byte - binary.BigEndian.PutUint64(hashbuf[:], hash64) - data2 := []byte{} - data2 = append(data2, []byte(a.key)...) - data2 = append(data2, a.groupKey(src)...) - data2 = append(data2, hashbuf[:]...) - - hash2, err := a.hash(data2) - if err != nil { - return 0, err - } - result := int(binary.BigEndian.Uint64(hash2) % newBucketCount) - return result, nil -} - -// hash(key + group + int64(hash(key + addr)) % buckets_per_group) % num_old_buckets -func (a *addrBook) calcOldBucket(addr *p2p.NetAddress) (int, error) { - data1 := []byte{} - data1 = append(data1, []byte(a.key)...) - data1 = append(data1, []byte(addr.String())...) - hash1, err := a.hash(data1) - if err != nil { - return 0, err - } - hash64 := binary.BigEndian.Uint64(hash1) - hash64 %= oldBucketsPerGroup - var hashbuf [8]byte - binary.BigEndian.PutUint64(hashbuf[:], hash64) - data2 := []byte{} - data2 = append(data2, []byte(a.key)...) - data2 = append(data2, a.groupKey(addr)...) - data2 = append(data2, hashbuf[:]...) - - hash2, err := a.hash(data2) - if err != nil { - return 0, err - } - result := int(binary.BigEndian.Uint64(hash2) % oldBucketCount) - return result, nil -} - -// Return a string representing the network group of this address. -// This is the /16 for IPv4 (e.g. 1.2.0.0), the /32 (/36 for he.net) for IPv6, the string -// "local" for a local address and the string "unroutable" for an unroutable -// address. -func (a *addrBook) groupKey(na *p2p.NetAddress) string { - return groupKeyFor(na, a.routabilityStrict) -} - -func groupKeyFor(na *p2p.NetAddress, routabilityStrict bool) string { - if routabilityStrict && na.Local() { - return "local" - } - if routabilityStrict && !na.Routable() { - return "unroutable" - } - - if ipv4 := na.IP.To4(); ipv4 != nil { - return na.IP.Mask(net.CIDRMask(16, 32)).String() - } - - if na.RFC6145() || na.RFC6052() { - // last four bytes are the ip address - ip := na.IP[12:16] - return ip.Mask(net.CIDRMask(16, 32)).String() - } - - if na.RFC3964() { - ip := na.IP[2:6] - return ip.Mask(net.CIDRMask(16, 32)).String() - } - - if na.RFC4380() { - // teredo tunnels have the last 4 bytes as the v4 address XOR - // 0xff. - ip := net.IP(make([]byte, 4)) - for i, byte := range na.IP[12:16] { - ip[i] = byte ^ 0xff - } - return ip.Mask(net.CIDRMask(16, 32)).String() - } - - if na.OnionCatTor() { - // group is keyed off the first 4 bits of the actual onion key. - return fmt.Sprintf("tor:%d", na.IP[6]&((1<<4)-1)) - } - - // OK, so now we know ourselves to be a IPv6 address. - // bitcoind uses /32 for everything, except for Hurricane Electric's - // (he.net) IP range, which it uses /36 for. - bits := 32 - heNet := &net.IPNet{IP: net.ParseIP("2001:470::"), Mask: net.CIDRMask(32, 128)} - if heNet.Contains(na.IP) { - bits = 36 - } - ipv6Mask := net.CIDRMask(bits, 128) - return na.IP.Mask(ipv6Mask).String() -} - -func (a *addrBook) hash(b []byte) ([]byte, error) { - a.hasher.Reset() - a.hasher.Write(b) - return a.hasher.Sum(nil), nil -} diff --git a/internal/p2p/pex/addrbook_test.go b/internal/p2p/pex/addrbook_test.go deleted file mode 100644 index 3d21314ad..000000000 --- a/internal/p2p/pex/addrbook_test.go +++ /dev/null @@ -1,777 +0,0 @@ -package pex - -import ( - "encoding/hex" - "fmt" - "io/ioutil" - "math" - mrand "math/rand" - "net" - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/libs/log" - tmmath "github.com/tendermint/tendermint/libs/math" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/types" -) - -// FIXME These tests should not rely on .(*addrBook) assertions - -func TestAddrBookPickAddress(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - - // 0 addresses - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - assert.Zero(t, book.Size()) - - addr := book.PickAddress(50) - assert.Nil(t, addr, "expected no address") - - randAddrs := randNetAddressPairs(t, 1) - addrSrc := randAddrs[0] - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - - // pick an address when we only have new address - addr = book.PickAddress(0) - assert.NotNil(t, addr, "expected an address") - addr = book.PickAddress(50) - assert.NotNil(t, addr, "expected an address") - addr = book.PickAddress(100) - assert.NotNil(t, addr, "expected an address") - - // pick an address when we only have old address - book.MarkGood(addrSrc.addr.ID) - addr = book.PickAddress(0) - assert.NotNil(t, addr, "expected an address") - addr = book.PickAddress(50) - assert.NotNil(t, addr, "expected an address") - - // in this case, nNew==0 but we biased 100% to new, so we return nil - addr = book.PickAddress(100) - assert.Nil(t, addr, "did not expected an address") -} - -func TestAddrBookSaveLoad(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - - // 0 addresses - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - book.Save() - - book = NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - err := book.Start() - require.NoError(t, err) - - assert.True(t, book.Empty()) - - // 100 addresses - randAddrs := randNetAddressPairs(t, 100) - - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - } - - assert.Equal(t, 100, book.Size()) - book.Save() - - book = NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - err = book.Start() - require.NoError(t, err) - - assert.Equal(t, 100, book.Size()) -} - -func TestAddrBookLookup(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - randAddrs := randNetAddressPairs(t, 100) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - for _, addrSrc := range randAddrs { - addr := addrSrc.addr - src := addrSrc.src - err := book.AddAddress(addr, src) - require.NoError(t, err) - - ka := book.HasAddress(addr) - assert.True(t, ka, "Expected to find KnownAddress %v but wasn't there.", addr) - } -} - -func TestAddrBookPromoteToOld(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - randAddrs := randNetAddressPairs(t, 100) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - } - - // Attempt all addresses. - for _, addrSrc := range randAddrs { - book.MarkAttempt(addrSrc.addr) - } - - // Promote half of them - for i, addrSrc := range randAddrs { - if i%2 == 0 { - book.MarkGood(addrSrc.addr.ID) - } - } - - // TODO: do more testing :) - - selection := book.GetSelection() - t.Logf("selection: %v", selection) - - if len(selection) > book.Size() { - t.Errorf("selection could not be bigger than the book") - } - - selection = book.GetSelectionWithBias(30) - t.Logf("selection: %v", selection) - - if len(selection) > book.Size() { - t.Errorf("selection with bias could not be bigger than the book") - } - - assert.Equal(t, book.Size(), 100, "expecting book size to be 100") -} - -func TestAddrBookHandlesDuplicates(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - - book.SetLogger(log.TestingLogger()) - - randAddrs := randNetAddressPairs(t, 100) - - differentSrc := randIPv4Address(t) - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - err = book.AddAddress(addrSrc.addr, addrSrc.src) // duplicate - require.NoError(t, err) - err = book.AddAddress(addrSrc.addr, differentSrc) // different src - require.NoError(t, err) - } - - assert.Equal(t, 100, book.Size()) -} - -type netAddressPair struct { - addr *p2p.NetAddress - src *p2p.NetAddress -} - -func randNetAddressPairs(t *testing.T, n int) []netAddressPair { - randAddrs := make([]netAddressPair, n) - for i := 0; i < n; i++ { - randAddrs[i] = netAddressPair{addr: randIPv4Address(t), src: randIPv4Address(t)} - } - return randAddrs -} - -func randIPv4Address(t *testing.T) *p2p.NetAddress { - for { - ip := fmt.Sprintf("%v.%v.%v.%v", - mrand.Intn(254)+1, - mrand.Intn(255), - mrand.Intn(255), - mrand.Intn(255), - ) - port := mrand.Intn(65535-1) + 1 - id := types.NodeID(hex.EncodeToString(tmrand.Bytes(types.NodeIDByteLength))) - idAddr := id.AddressString(fmt.Sprintf("%v:%v", ip, port)) - addr, err := types.NewNetAddressString(idAddr) - assert.Nil(t, err, "error generating rand network address") - if addr.Routable() { - return addr - } - } -} - -func TestAddrBookRemoveAddress(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - addr := randIPv4Address(t) - err := book.AddAddress(addr, addr) - require.NoError(t, err) - assert.Equal(t, 1, book.Size()) - - book.RemoveAddress(addr) - assert.Equal(t, 0, book.Size()) - - nonExistingAddr := randIPv4Address(t) - book.RemoveAddress(nonExistingAddr) - assert.Equal(t, 0, book.Size()) -} - -func TestAddrBookGetSelectionWithOneMarkedGood(t *testing.T) { - // create a book with 10 addresses, 1 good/old and 9 new - book, _ := createAddrBookWithMOldAndNNewAddrs(t, 1, 9) - addrs := book.GetSelectionWithBias(biasToSelectNewPeers) - assert.NotNil(t, addrs) - assertMOldAndNNewAddrsInSelection(t, 1, 9, addrs, book) -} - -func TestAddrBookGetSelectionWithOneNotMarkedGood(t *testing.T) { - // create a book with 10 addresses, 9 good/old and 1 new - book, _ := createAddrBookWithMOldAndNNewAddrs(t, 9, 1) - addrs := book.GetSelectionWithBias(biasToSelectNewPeers) - assert.NotNil(t, addrs) - assertMOldAndNNewAddrsInSelection(t, 9, 1, addrs, book) -} - -func TestAddrBookGetSelectionReturnsNilWhenAddrBookIsEmpty(t *testing.T) { - book, _ := createAddrBookWithMOldAndNNewAddrs(t, 0, 0) - addrs := book.GetSelectionWithBias(biasToSelectNewPeers) - assert.Nil(t, addrs) -} - -func TestAddrBookGetSelection(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - // 1) empty book - assert.Empty(t, book.GetSelection()) - - // 2) add one address - addr := randIPv4Address(t) - err := book.AddAddress(addr, addr) - require.NoError(t, err) - - assert.Equal(t, 1, len(book.GetSelection())) - assert.Equal(t, addr, book.GetSelection()[0]) - - // 3) add a bunch of addresses - randAddrs := randNetAddressPairs(t, 100) - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - } - - // check there is no duplicates - addrs := make(map[string]*p2p.NetAddress) - selection := book.GetSelection() - for _, addr := range selection { - if dup, ok := addrs[addr.String()]; ok { - t.Fatalf("selection %v contains duplicates %v", selection, dup) - } - addrs[addr.String()] = addr - } - - if len(selection) > book.Size() { - t.Errorf("selection %v could not be bigger than the book", selection) - } -} - -func TestAddrBookGetSelectionWithBias(t *testing.T) { - const biasTowardsNewAddrs = 30 - - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - // 1) empty book - selection := book.GetSelectionWithBias(biasTowardsNewAddrs) - assert.Empty(t, selection) - - // 2) add one address - addr := randIPv4Address(t) - err := book.AddAddress(addr, addr) - require.NoError(t, err) - - selection = book.GetSelectionWithBias(biasTowardsNewAddrs) - assert.Equal(t, 1, len(selection)) - assert.Equal(t, addr, selection[0]) - - // 3) add a bunch of addresses - randAddrs := randNetAddressPairs(t, 100) - for _, addrSrc := range randAddrs { - err := book.AddAddress(addrSrc.addr, addrSrc.src) - require.NoError(t, err) - } - - // check there is no duplicates - addrs := make(map[string]*p2p.NetAddress) - selection = book.GetSelectionWithBias(biasTowardsNewAddrs) - for _, addr := range selection { - if dup, ok := addrs[addr.String()]; ok { - t.Fatalf("selection %v contains duplicates %v", selection, dup) - } - addrs[addr.String()] = addr - } - - if len(selection) > book.Size() { - t.Fatalf("selection %v could not be bigger than the book", selection) - } - - // 4) mark 80% of the addresses as good - randAddrsLen := len(randAddrs) - for i, addrSrc := range randAddrs { - if int((float64(i)/float64(randAddrsLen))*100) >= 20 { - book.MarkGood(addrSrc.addr.ID) - } - } - - selection = book.GetSelectionWithBias(biasTowardsNewAddrs) - - // check that ~70% of addresses returned are good - good := 0 - for _, addr := range selection { - if book.IsGood(addr) { - good++ - } - } - - got, expected := int((float64(good)/float64(len(selection)))*100), 100-biasTowardsNewAddrs - - // compute some slack to protect against small differences due to rounding: - slack := int(math.Round(float64(100) / float64(len(selection)))) - if got > expected+slack { - t.Fatalf( - "got more good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)", - got, - expected, - good, - len(selection), - ) - } - if got < expected-slack { - t.Fatalf( - "got fewer good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)", - got, - expected, - good, - len(selection), - ) - } -} - -func TestAddrBookHasAddress(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - addr := randIPv4Address(t) - err := book.AddAddress(addr, addr) - require.NoError(t, err) - - assert.True(t, book.HasAddress(addr)) - - book.RemoveAddress(addr) - - assert.False(t, book.HasAddress(addr)) -} - -func testCreatePrivateAddrs(t *testing.T, numAddrs int) ([]*p2p.NetAddress, []string) { - t.Helper() - addrs := make([]*p2p.NetAddress, numAddrs) - for i := 0; i < numAddrs; i++ { - addrs[i] = randIPv4Address(t) - } - - private := make([]string, numAddrs) - for i, addr := range addrs { - private[i] = string(addr.ID) - } - return addrs, private -} - -func TestBanBadPeers(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - addr := randIPv4Address(t) - _ = book.AddAddress(addr, addr) - - book.MarkBad(addr, 1*time.Second) - // addr should not reachable - assert.False(t, book.HasAddress(addr)) - assert.True(t, book.IsBanned(addr)) - - err := book.AddAddress(addr, addr) - // book should not add address from the blacklist - assert.Error(t, err) - - time.Sleep(1 * time.Second) - book.ReinstateBadPeers() - // address should be reinstated in the new bucket - assert.EqualValues(t, 1, book.Size()) - assert.True(t, book.HasAddress(addr)) - assert.False(t, book.IsGood(addr)) -} - -func TestAddrBookEmpty(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - // Check that empty book is empty - require.True(t, book.Empty()) - // Check that book with our address is empty - book.AddOurAddress(randIPv4Address(t)) - require.True(t, book.Empty()) - // Check that book with private addrs is empty - _, privateIds := testCreatePrivateAddrs(t, 5) - book.AddPrivateIDs(privateIds) - require.True(t, book.Empty()) - - // Check that book with address is not empty - err := book.AddAddress(randIPv4Address(t), randIPv4Address(t)) - require.NoError(t, err) - require.False(t, book.Empty()) -} - -func TestPrivatePeers(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - addrs, private := testCreatePrivateAddrs(t, 10) - book.AddPrivateIDs(private) - - // private addrs must not be added - for _, addr := range addrs { - err := book.AddAddress(addr, addr) - if assert.Error(t, err) { - _, ok := err.(ErrAddrBookPrivate) - assert.True(t, ok) - } - } - - // addrs coming from private peers must not be added - err := book.AddAddress(randIPv4Address(t), addrs[0]) - if assert.Error(t, err) { - _, ok := err.(ErrAddrBookPrivateSrc) - assert.True(t, ok) - } -} - -func testAddrBookAddressSelection(t *testing.T, bookSize int) { - // generate all combinations of old (m) and new addresses - for nBookOld := 0; nBookOld <= bookSize; nBookOld++ { - nBookNew := bookSize - nBookOld - dbgStr := fmt.Sprintf("book of size %d (new %d, old %d)", bookSize, nBookNew, nBookOld) - - // create book and get selection - book, _ := createAddrBookWithMOldAndNNewAddrs(t, nBookOld, nBookNew) - addrs := book.GetSelectionWithBias(biasToSelectNewPeers) - assert.NotNil(t, addrs, "%s - expected a non-nil selection", dbgStr) - nAddrs := len(addrs) - assert.NotZero(t, nAddrs, "%s - expected at least one address in selection", dbgStr) - - // check there's no nil addresses - for _, addr := range addrs { - if addr == nil { - t.Fatalf("%s - got nil address in selection %v", dbgStr, addrs) - } - } - - // XXX: shadowing - nOld, nNew := countOldAndNewAddrsInSelection(addrs, book) - - // Given: - // n - num new addrs, m - num old addrs - // k - num new addrs expected in the beginning (based on bias %) - // i=min(n, max(k,r-m)), aka expNew - // j=min(m, r-i), aka expOld - // - // We expect this layout: - // indices: 0...i-1 i...i+j-1 - // addresses: N0..Ni-1 O0..Oj-1 - // - // There is at least one partition and at most three. - var ( - k = percentageOfNum(biasToSelectNewPeers, nAddrs) - expNew = tmmath.MinInt(nNew, tmmath.MaxInt(k, nAddrs-nBookOld)) - expOld = tmmath.MinInt(nOld, nAddrs-expNew) - ) - - // Verify that the number of old and new addresses are as expected - if nNew != expNew { - t.Fatalf("%s - expected new addrs %d, got %d", dbgStr, expNew, nNew) - } - if nOld != expOld { - t.Fatalf("%s - expected old addrs %d, got %d", dbgStr, expOld, nOld) - } - - // Verify that the order of addresses is as expected - // Get the sequence types and lengths of the selection - seqLens, seqTypes, err := analyseSelectionLayout(book, addrs) - assert.NoError(t, err, "%s", dbgStr) - - // Build a list with the expected lengths of partitions and another with the expected types, e.g.: - // expSeqLens = [10, 22], expSeqTypes = [1, 2] - // means we expect 10 new (type 1) addresses followed by 22 old (type 2) addresses. - var expSeqLens []int - var expSeqTypes []int - - switch { - case expOld == 0: // all new addresses - expSeqLens = []int{nAddrs} - expSeqTypes = []int{1} - case expNew == 0: // all old addresses - expSeqLens = []int{nAddrs} - expSeqTypes = []int{2} - case nAddrs-expNew-expOld == 0: // new addresses, old addresses - expSeqLens = []int{expNew, expOld} - expSeqTypes = []int{1, 2} - } - - assert.Equal(t, expSeqLens, seqLens, - "%s - expected sequence lengths of old/new %v, got %v", - dbgStr, expSeqLens, seqLens) - assert.Equal(t, expSeqTypes, seqTypes, - "%s - expected sequence types (1-new, 2-old) was %v, got %v", - dbgStr, expSeqTypes, seqTypes) - } -} - -func TestMultipleAddrBookAddressSelection(t *testing.T) { - // test books with smaller size, < N - const N = 32 - for bookSize := 1; bookSize < N; bookSize++ { - testAddrBookAddressSelection(t, bookSize) - } - - // Test for two books with sizes from following ranges - ranges := [...][]int{{33, 100}, {100, 175}} - bookSizes := make([]int, 0, len(ranges)) - for _, r := range ranges { - bookSizes = append(bookSizes, mrand.Intn(r[1]-r[0])+r[0]) - } - t.Logf("Testing address selection for the following book sizes %v\n", bookSizes) - for _, bookSize := range bookSizes { - testAddrBookAddressSelection(t, bookSize) - } -} - -func TestAddrBookAddDoesNotOverwriteOldIP(t *testing.T) { - fname := createTempFileName(t, "addrbook_test") - - // This test creates adds a peer to the address book and marks it good - // It then attempts to override the peer's IP, by adding a peer with the same ID - // but different IP. We distinguish the IP's by "RealIP" and "OverrideAttemptIP" - peerID := "678503e6c8f50db7279c7da3cb9b072aac4bc0d5" - peerRealIP := "1.1.1.1:26656" - peerOverrideAttemptIP := "2.2.2.2:26656" - SrcAddr := "b0dd378c3fbc4c156cd6d302a799f0d2e4227201@159.89.121.174:26656" - - // There is a chance that AddAddress will ignore the new peer its given. - // So we repeat trying to override the peer several times, - // to ensure we aren't in a case that got probabilistically ignored - numOverrideAttempts := 10 - - peerRealAddr, err := types.NewNetAddressString(peerID + "@" + peerRealIP) - require.Nil(t, err) - - peerOverrideAttemptAddr, err := types.NewNetAddressString(peerID + "@" + peerOverrideAttemptIP) - require.Nil(t, err) - - src, err := types.NewNetAddressString(SrcAddr) - require.Nil(t, err) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - err = book.AddAddress(peerRealAddr, src) - require.Nil(t, err) - book.MarkAttempt(peerRealAddr) - book.MarkGood(peerRealAddr.ID) - - // Double check that adding a peer again doesn't error - err = book.AddAddress(peerRealAddr, src) - require.Nil(t, err) - - // Try changing ip but keeping the same node id. (change 1.1.1.1 to 2.2.2.2) - // This should just be ignored, and not error. - for i := 0; i < numOverrideAttempts; i++ { - err = book.AddAddress(peerOverrideAttemptAddr, src) - require.Nil(t, err) - } - // Now check that the IP was not overridden. - // This is done by sampling several peers from addr book - // and ensuring they all have the correct IP. - // In the expected functionality, this test should only have 1 Peer, hence will pass. - for i := 0; i < numOverrideAttempts; i++ { - selection := book.GetSelection() - for _, addr := range selection { - require.Equal(t, addr.IP, peerRealAddr.IP) - } - } -} - -func TestAddrBookGroupKey(t *testing.T) { - // non-strict routability - testCases := []struct { - name string - ip string - expKey string - }{ - // IPv4 normal. - {"ipv4 normal class a", "12.1.2.3", "12.1.0.0"}, - {"ipv4 normal class b", "173.1.2.3", "173.1.0.0"}, - {"ipv4 normal class c", "196.1.2.3", "196.1.0.0"}, - - // IPv6/IPv4 translations. - {"ipv6 rfc3964 with ipv4 encap", "2002:0c01:0203::", "12.1.0.0"}, - {"ipv6 rfc4380 toredo ipv4", "2001:0:1234::f3fe:fdfc", "12.1.0.0"}, - {"ipv6 rfc6052 well-known prefix with ipv4", "64:ff9b::0c01:0203", "12.1.0.0"}, - {"ipv6 rfc6145 translated ipv4", "::ffff:0:0c01:0203", "12.1.0.0"}, - - // Tor. - {"ipv6 tor onioncat", "fd87:d87e:eb43:1234::5678", "tor:2"}, - {"ipv6 tor onioncat 2", "fd87:d87e:eb43:1245::6789", "tor:2"}, - {"ipv6 tor onioncat 3", "fd87:d87e:eb43:1345::6789", "tor:3"}, - - // IPv6 normal. - {"ipv6 normal", "2602:100::1", "2602:100::"}, - {"ipv6 normal 2", "2602:0100::1234", "2602:100::"}, - {"ipv6 hurricane electric", "2001:470:1f10:a1::2", "2001:470:1000::"}, - {"ipv6 hurricane electric 2", "2001:0470:1f10:a1::2", "2001:470:1000::"}, - } - - for i, tc := range testCases { - nip := net.ParseIP(tc.ip) - key := groupKeyFor(types.NewNetAddressIPPort(nip, 26656), false) - assert.Equal(t, tc.expKey, key, "#%d", i) - } - - // strict routability - testCases = []struct { - name string - ip string - expKey string - }{ - // Local addresses. - {"ipv4 localhost", "127.0.0.1", "local"}, - {"ipv6 localhost", "::1", "local"}, - {"ipv4 zero", "0.0.0.0", "local"}, - {"ipv4 first octet zero", "0.1.2.3", "local"}, - - // Unroutable addresses. - {"ipv4 invalid bcast", "255.255.255.255", "unroutable"}, - {"ipv4 rfc1918 10/8", "10.1.2.3", "unroutable"}, - {"ipv4 rfc1918 172.16/12", "172.16.1.2", "unroutable"}, - {"ipv4 rfc1918 192.168/16", "192.168.1.2", "unroutable"}, - {"ipv6 rfc3849 2001:db8::/32", "2001:db8::1234", "unroutable"}, - {"ipv4 rfc3927 169.254/16", "169.254.1.2", "unroutable"}, - {"ipv6 rfc4193 fc00::/7", "fc00::1234", "unroutable"}, - {"ipv6 rfc4843 2001:10::/28", "2001:10::1234", "unroutable"}, - {"ipv6 rfc4862 fe80::/64", "fe80::1234", "unroutable"}, - } - - for i, tc := range testCases { - nip := net.ParseIP(tc.ip) - key := groupKeyFor(types.NewNetAddressIPPort(nip, 26656), true) - assert.Equal(t, tc.expKey, key, "#%d", i) - } -} - -func assertMOldAndNNewAddrsInSelection(t *testing.T, m, n int, addrs []*p2p.NetAddress, book *addrBook) { - nOld, nNew := countOldAndNewAddrsInSelection(addrs, book) - assert.Equal(t, m, nOld, "old addresses") - assert.Equal(t, n, nNew, "new addresses") -} - -func createTempFileName(t *testing.T, prefix string) string { - t.Helper() - f, err := ioutil.TempFile("", prefix) - if err != nil { - panic(err) - } - - fname := f.Name() - if err := f.Close(); err != nil { - t.Fatal(err) - } - - t.Cleanup(func() { _ = os.Remove(fname) }) - - return fname -} - -func createAddrBookWithMOldAndNNewAddrs(t *testing.T, nOld, nNew int) (book *addrBook, fname string) { - t.Helper() - fname = createTempFileName(t, "addrbook_test") - - book = NewAddrBook(fname, true).(*addrBook) - book.SetLogger(log.TestingLogger()) - assert.Zero(t, book.Size()) - - randAddrs := randNetAddressPairs(t, nOld) - for _, addr := range randAddrs { - err := book.AddAddress(addr.addr, addr.src) - require.NoError(t, err) - book.MarkGood(addr.addr.ID) - } - - randAddrs = randNetAddressPairs(t, nNew) - for _, addr := range randAddrs { - err := book.AddAddress(addr.addr, addr.src) - require.NoError(t, err) - } - - return -} - -func countOldAndNewAddrsInSelection(addrs []*p2p.NetAddress, book *addrBook) (nOld, nNew int) { - for _, addr := range addrs { - if book.IsGood(addr) { - nOld++ - } else { - nNew++ - } - } - return -} - -// Analyze the layout of the selection specified by 'addrs' -// Returns: -// - seqLens - the lengths of the sequences of addresses of same type -// - seqTypes - the types of sequences in selection -func analyseSelectionLayout(book *addrBook, addrs []*p2p.NetAddress) (seqLens, seqTypes []int, err error) { - // address types are: 0 - nil, 1 - new, 2 - old - var ( - prevType = 0 - currentSeqLen = 0 - ) - - for _, addr := range addrs { - addrType := 0 - if book.IsGood(addr) { - addrType = 2 - } else { - addrType = 1 - } - if addrType != prevType && prevType != 0 { - seqLens = append(seqLens, currentSeqLen) - seqTypes = append(seqTypes, prevType) - currentSeqLen = 0 - } - currentSeqLen++ - prevType = addrType - } - - seqLens = append(seqLens, currentSeqLen) - seqTypes = append(seqTypes, prevType) - - return -} diff --git a/internal/p2p/pex/bench_test.go b/internal/p2p/pex/bench_test.go deleted file mode 100644 index 37019f60a..000000000 --- a/internal/p2p/pex/bench_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package pex - -import ( - "testing" - - "github.com/tendermint/tendermint/types" -) - -func BenchmarkAddrBook_hash(b *testing.B) { - book := &addrBook{ - ourAddrs: make(map[string]struct{}), - privateIDs: make(map[types.NodeID]struct{}), - addrLookup: make(map[types.NodeID]*knownAddress), - badPeers: make(map[types.NodeID]*knownAddress), - filePath: "", - routabilityStrict: true, - } - book.init() - msg := []byte(`foobar`) - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, _ = book.hash(msg) - } -} diff --git a/internal/p2p/pex/doc.go b/internal/p2p/pex/doc.go index dc4f5d37a..70a5f6174 100644 --- a/internal/p2p/pex/doc.go +++ b/internal/p2p/pex/doc.go @@ -7,19 +7,14 @@ The PEX reactor is a continuous service which periodically requests addresses and serves addresses to other peers. There are two versions of this service aligning with the two p2p frameworks that Tendermint currently supports. -V1 is coupled with the Switch (which handles peer connections and routing of -messages) and, alongside exchanging peer information in the form of port/IP -pairs, also has the responsibility of dialing peers and ensuring that a -node has a sufficient amount of peers connected. - -V2 is embedded with the new p2p stack and uses the peer manager to advertise +The reactor is embedded with the new p2p stack and uses the peer manager to advertise peers as well as add new peers to the peer store. The V2 reactor passes a different set of proto messages which include a list of [urls](https://golang.org/pkg/net/url/#URL).These can be used to save a set of endpoints that each peer uses. The V2 reactor has backwards compatibility with V1. It can also handle V1 messages. -The V2 reactor is able to tweak the intensity of it's search by decreasing or +The reactor is able to tweak the intensity of it's search by decreasing or increasing the interval between each request. It tracks connected peers via a linked list, sending a request to the node at the front of the list and adding it to the back of the list once a response is received. Using this method, a diff --git a/internal/p2p/pex/errors.go b/internal/p2p/pex/errors.go deleted file mode 100644 index 275e71bf9..000000000 --- a/internal/p2p/pex/errors.go +++ /dev/null @@ -1,89 +0,0 @@ -package pex - -import ( - "errors" - "fmt" - - "github.com/tendermint/tendermint/internal/p2p" -) - -type ErrAddrBookNonRoutable struct { - Addr *p2p.NetAddress -} - -func (err ErrAddrBookNonRoutable) Error() string { - return fmt.Sprintf("Cannot add non-routable address %v", err.Addr) -} - -type errAddrBookOldAddressNewBucket struct { - Addr *p2p.NetAddress - BucketID int -} - -func (err errAddrBookOldAddressNewBucket) Error() string { - return fmt.Sprintf("failed consistency check!"+ - " Cannot add pre-existing address %v into new bucket %v", - err.Addr, err.BucketID) -} - -type ErrAddrBookSelf struct { - Addr *p2p.NetAddress -} - -func (err ErrAddrBookSelf) Error() string { - return fmt.Sprintf("Cannot add ourselves with address %v", err.Addr) -} - -type ErrAddrBookPrivate struct { - Addr *p2p.NetAddress -} - -func (err ErrAddrBookPrivate) Error() string { - return fmt.Sprintf("Cannot add private peer with address %v", err.Addr) -} - -func (err ErrAddrBookPrivate) PrivateAddr() bool { - return true -} - -type ErrAddrBookPrivateSrc struct { - Src *p2p.NetAddress -} - -func (err ErrAddrBookPrivateSrc) Error() string { - return fmt.Sprintf("Cannot add peer coming from private peer with address %v", err.Src) -} - -func (err ErrAddrBookPrivateSrc) PrivateAddr() bool { - return true -} - -type ErrAddrBookNilAddr struct { - Addr *p2p.NetAddress - Src *p2p.NetAddress -} - -func (err ErrAddrBookNilAddr) Error() string { - return fmt.Sprintf("Cannot add a nil address. Got (addr, src) = (%v, %v)", err.Addr, err.Src) -} - -type ErrAddrBookInvalidAddr struct { - Addr *p2p.NetAddress - AddrErr error -} - -func (err ErrAddrBookInvalidAddr) Error() string { - return fmt.Sprintf("Cannot add invalid address %v: %v", err.Addr, err.AddrErr) -} - -// ErrAddressBanned is thrown when the address has been banned and therefore cannot be used -type ErrAddressBanned struct { - Addr *p2p.NetAddress -} - -func (err ErrAddressBanned) Error() string { - return fmt.Sprintf("Address: %v is currently banned", err.Addr) -} - -// ErrUnsolicitedList is thrown when a peer provides a list of addresses that have not been asked for. -var ErrUnsolicitedList = errors.New("unsolicited pexAddrsMessage") diff --git a/internal/p2p/pex/file.go b/internal/p2p/pex/file.go deleted file mode 100644 index ce65f7d4d..000000000 --- a/internal/p2p/pex/file.go +++ /dev/null @@ -1,83 +0,0 @@ -package pex - -import ( - "encoding/json" - "fmt" - "os" - - "github.com/tendermint/tendermint/internal/libs/tempfile" -) - -/* Loading & Saving */ - -type addrBookJSON struct { - Key string `json:"key"` - Addrs []*knownAddress `json:"addrs"` -} - -func (a *addrBook) saveToFile(filePath string) { - a.mtx.Lock() - defer a.mtx.Unlock() - - a.Logger.Info("Saving AddrBook to file", "size", a.size()) - - addrs := make([]*knownAddress, 0, len(a.addrLookup)) - for _, ka := range a.addrLookup { - addrs = append(addrs, ka) - } - aJSON := &addrBookJSON{ - Key: a.key, - Addrs: addrs, - } - - jsonBytes, err := json.MarshalIndent(aJSON, "", "\t") - if err != nil { - a.Logger.Error("Failed to save AddrBook to file", "err", err) - return - } - err = tempfile.WriteFileAtomic(filePath, jsonBytes, 0644) - if err != nil { - a.Logger.Error("Failed to save AddrBook to file", "file", filePath, "err", err) - } -} - -// Returns false if file does not exist. -// cmn.Panics if file is corrupt. -func (a *addrBook) loadFromFile(filePath string) bool { - // If doesn't exist, do nothing. - _, err := os.Stat(filePath) - if os.IsNotExist(err) { - return false - } - - // Load addrBookJSON{} - r, err := os.Open(filePath) - if err != nil { - panic(fmt.Sprintf("Error opening file %s: %v", filePath, err)) - } - defer r.Close() - aJSON := &addrBookJSON{} - dec := json.NewDecoder(r) - err = dec.Decode(aJSON) - if err != nil { - panic(fmt.Sprintf("Error reading file %s: %v", filePath, err)) - } - - // Restore all the fields... - // Restore the key - a.key = aJSON.Key - // Restore .bucketsNew & .bucketsOld - for _, ka := range aJSON.Addrs { - for _, bucketIndex := range ka.Buckets { - bucket := a.getBucket(ka.BucketType, bucketIndex) - bucket[ka.Addr.String()] = ka - } - a.addrLookup[ka.ID()] = ka - if ka.BucketType == bucketTypeNew { - a.nNew++ - } else { - a.nOld++ - } - } - return true -} diff --git a/internal/p2p/pex/known_address.go b/internal/p2p/pex/known_address.go deleted file mode 100644 index 2a2ebe038..000000000 --- a/internal/p2p/pex/known_address.go +++ /dev/null @@ -1,141 +0,0 @@ -package pex - -import ( - "time" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" -) - -// knownAddress tracks information about a known network address -// that is used to determine how viable an address is. -type knownAddress struct { - Addr *p2p.NetAddress `json:"addr"` - Src *p2p.NetAddress `json:"src"` - Buckets []int `json:"buckets"` - Attempts int32 `json:"attempts"` - BucketType byte `json:"bucket_type"` - LastAttempt time.Time `json:"last_attempt"` - LastSuccess time.Time `json:"last_success"` - LastBanTime time.Time `json:"last_ban_time"` -} - -func newKnownAddress(addr *p2p.NetAddress, src *p2p.NetAddress) *knownAddress { - return &knownAddress{ - Addr: addr, - Src: src, - Attempts: 0, - LastAttempt: time.Now(), - BucketType: bucketTypeNew, - Buckets: nil, - } -} - -func (ka *knownAddress) ID() types.NodeID { - return ka.Addr.ID -} - -func (ka *knownAddress) isOld() bool { - return ka.BucketType == bucketTypeOld -} - -func (ka *knownAddress) isNew() bool { - return ka.BucketType == bucketTypeNew -} - -func (ka *knownAddress) markAttempt() { - now := time.Now() - ka.LastAttempt = now - ka.Attempts++ -} - -func (ka *knownAddress) markGood() { - now := time.Now() - ka.LastAttempt = now - ka.Attempts = 0 - ka.LastSuccess = now -} - -func (ka *knownAddress) ban(banTime time.Duration) { - if ka.LastBanTime.Before(time.Now().Add(banTime)) { - ka.LastBanTime = time.Now().Add(banTime) - } -} - -func (ka *knownAddress) isBanned() bool { - return ka.LastBanTime.After(time.Now()) -} - -func (ka *knownAddress) addBucketRef(bucketIdx int) int { - for _, bucket := range ka.Buckets { - if bucket == bucketIdx { - // TODO refactor to return error? - // log.Warn(Fmt("Bucket already exists in ka.Buckets: %v", ka)) - return -1 - } - } - ka.Buckets = append(ka.Buckets, bucketIdx) - return len(ka.Buckets) -} - -func (ka *knownAddress) removeBucketRef(bucketIdx int) int { - buckets := []int{} - for _, bucket := range ka.Buckets { - if bucket != bucketIdx { - buckets = append(buckets, bucket) - } - } - if len(buckets) != len(ka.Buckets)-1 { - // TODO refactor to return error? - // log.Warn(Fmt("bucketIdx not found in ka.Buckets: %v", ka)) - return -1 - } - ka.Buckets = buckets - return len(ka.Buckets) -} - -/* - An address is bad if the address in question is a New address, has not been tried in the last - minute, and meets one of the following criteria: - - 1) It claims to be from the future - 2) It hasn't been seen in over a week - 3) It has failed at least three times and never succeeded - 4) It has failed ten times in the last week - - All addresses that meet these criteria are assumed to be worthless and not - worth keeping hold of. - -*/ -func (ka *knownAddress) isBad() bool { - // Is Old --> good - if ka.BucketType == bucketTypeOld { - return false - } - - // Has been attempted in the last minute --> good - if ka.LastAttempt.After(time.Now().Add(-1 * time.Minute)) { - return false - } - - // TODO: From the future? - - // Too old? - // TODO: should be a timestamp of last seen, not just last attempt - if ka.LastAttempt.Before(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) { - return true - } - - // Never succeeded? - if ka.LastSuccess.IsZero() && ka.Attempts >= numRetries { - return true - } - - // Hasn't succeeded in too long? - if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) && - ka.Attempts >= maxFailures { - return true - } - - return false -} diff --git a/internal/p2p/pex/params.go b/internal/p2p/pex/params.go deleted file mode 100644 index 29b4d45ab..000000000 --- a/internal/p2p/pex/params.go +++ /dev/null @@ -1,55 +0,0 @@ -package pex - -import "time" - -const ( - // addresses under which the address manager will claim to need more addresses. - needAddressThreshold = 1000 - - // interval used to dump the address cache to disk for future use. - dumpAddressInterval = time.Minute * 2 - - // max addresses in each old address bucket. - oldBucketSize = 64 - - // buckets we split old addresses over. - oldBucketCount = 64 - - // max addresses in each new address bucket. - newBucketSize = 64 - - // buckets that we spread new addresses over. - newBucketCount = 256 - - // old buckets over which an address group will be spread. - oldBucketsPerGroup = 4 - - // new buckets over which a source address group will be spread. - newBucketsPerGroup = 32 - - // buckets a frequently seen new address may end up in. - maxNewBucketsPerAddress = 4 - - // days before which we assume an address has vanished - // if we have not seen it announced in that long. - numMissingDays = 7 - - // tries without a single success before we assume an address is bad. - numRetries = 3 - - // max failures we will accept without a success before considering an address bad. - maxFailures = 10 // ? - - // days since the last success before we will consider evicting an address. - minBadDays = 7 - - // % of total addresses known returned by GetSelection. - getSelectionPercent = 23 - - // min addresses that must be returned by GetSelection. Useful for bootstrapping. - minGetSelection = 32 - - // max addresses returned by GetSelection - // NOTE: this must match "maxMsgSize" - maxGetSelection = 250 -) diff --git a/internal/p2p/pex/pex_reactor.go b/internal/p2p/pex/pex_reactor.go deleted file mode 100644 index 049dbd9f1..000000000 --- a/internal/p2p/pex/pex_reactor.go +++ /dev/null @@ -1,863 +0,0 @@ -package pex - -import ( - "errors" - "fmt" - "net" - "sync" - "time" - - "github.com/gogo/protobuf/proto" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/cmap" - tmmath "github.com/tendermint/tendermint/libs/math" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/libs/service" - tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" -) - -type Peer = p2p.Peer - -const ( - // PexChannel is a channel for PEX messages - PexChannel = byte(0x00) - - // over-estimate of max NetAddress size - // hexID (40) + IP (16) + Port (2) + Name (100) ... - // NOTE: dont use massive DNS name .. - maxAddressSize = 256 - - // NOTE: amplificaiton factor! - // small request results in up to maxMsgSize response - maxMsgSize = maxAddressSize * maxGetSelection - - // ensure we have enough peers - defaultEnsurePeersPeriod = 30 * time.Second - - // Seed/Crawler constants - - // minTimeBetweenCrawls is a minimum time between attempts to crawl a peer. - minTimeBetweenCrawls = 2 * time.Minute - - // check some peers every this - crawlPeerPeriod = 30 * time.Second - - maxAttemptsToDial = 16 // ~ 35h in total (last attempt - 18h) - - // if node connects to seed, it does not have any trusted peers. - // Especially in the beginning, node should have more trusted peers than - // untrusted. - biasToSelectNewPeers = 30 // 70 to select good peers - - // if a peer is marked bad, it will be banned for at least this time period - defaultBanTime = 24 * time.Hour -) - -type errMaxAttemptsToDial struct { -} - -func (e errMaxAttemptsToDial) Error() string { - return fmt.Sprintf("reached max attempts %d to dial", maxAttemptsToDial) -} - -type errTooEarlyToDial struct { - backoffDuration time.Duration - lastDialed time.Time -} - -func (e errTooEarlyToDial) Error() string { - return fmt.Sprintf( - "too early to dial (backoff duration: %d, last dialed: %v, time since: %v)", - e.backoffDuration, e.lastDialed, time.Since(e.lastDialed)) -} - -// Reactor handles PEX (peer exchange) and ensures that an -// adequate number of peers are connected to the switch. -// -// It uses `AddrBook` (address book) to store `NetAddress`es of the peers. -// -// ## Preventing abuse -// -// Only accept pexAddrsMsg from peers we sent a corresponding pexRequestMsg too. -// Only accept one pexRequestMsg every ~defaultEnsurePeersPeriod. -type Reactor struct { - p2p.BaseReactor - - book AddrBook - config *ReactorConfig - ensurePeersPeriod time.Duration // TODO: should go in the config - - // maps to prevent abuse - requestsSent *cmap.CMap // ID->struct{}: unanswered send requests - lastReceivedRequests *cmap.CMap // ID->time.Time: last time peer requested from us - - seedAddrs []*p2p.NetAddress - - attemptsToDial sync.Map // address (string) -> {number of attempts (int), last time dialed (time.Time)} - - // seed/crawled mode fields - crawlPeerInfos map[types.NodeID]crawlPeerInfo -} - -func (r *Reactor) minReceiveRequestInterval() time.Duration { - // NOTE: must be less than ensurePeersPeriod, otherwise we'll request - // peers too quickly from others and they'll think we're bad! - return r.ensurePeersPeriod / 3 -} - -// ReactorConfig holds reactor specific configuration data. -type ReactorConfig struct { - // Seed/Crawler mode - SeedMode bool - - // We want seeds to only advertise good peers. Therefore they should wait at - // least as long as we expect it to take for a peer to become good before - // disconnecting. - SeedDisconnectWaitPeriod time.Duration - - // Maximum pause when redialing a persistent peer (if zero, exponential backoff is used) - PersistentPeersMaxDialPeriod time.Duration - - // Seeds is a list of addresses reactor may use - // if it can't connect to peers in the addrbook. - Seeds []string -} - -type _attemptsToDial struct { - number int - lastDialed time.Time -} - -// NewReactor creates new PEX reactor. -func NewReactor(b AddrBook, config *ReactorConfig) *Reactor { - r := &Reactor{ - book: b, - config: config, - ensurePeersPeriod: defaultEnsurePeersPeriod, - requestsSent: cmap.NewCMap(), - lastReceivedRequests: cmap.NewCMap(), - crawlPeerInfos: make(map[types.NodeID]crawlPeerInfo), - } - r.BaseReactor = *p2p.NewBaseReactor("PEX", r) - return r -} - -// OnStart implements BaseService -func (r *Reactor) OnStart() error { - err := r.book.Start() - if err != nil && err != service.ErrAlreadyStarted { - return err - } - - numOnline, seedAddrs, err := r.checkSeeds() - if err != nil { - return err - } else if numOnline == 0 && r.book.Empty() { - return errors.New("address book is empty and couldn't resolve any seed nodes") - } - - r.seedAddrs = seedAddrs - - // Check if this node should run - // in seed/crawler mode - if r.config.SeedMode { - go r.crawlPeersRoutine() - } else { - go r.ensurePeersRoutine() - } - return nil -} - -// OnStop implements BaseService -func (r *Reactor) OnStop() { - if err := r.book.Stop(); err != nil { - r.Logger.Error("Error stopping address book", "err", err) - } -} - -// GetChannels implements Reactor -func (r *Reactor) GetChannels() []*conn.ChannelDescriptor { - return []*conn.ChannelDescriptor{ - { - ID: PexChannel, - Priority: 1, - SendQueueCapacity: 10, - RecvMessageCapacity: maxMsgSize, - - MaxSendBytes: 200, - }, - } -} - -// AddPeer implements Reactor by adding peer to the address book (if inbound) -// or by requesting more addresses (if outbound). -func (r *Reactor) AddPeer(p Peer) { - if p.IsOutbound() { - // For outbound peers, the address is already in the books - - // either via DialPeersAsync or r.Receive. - // Ask it for more peers if we need. - if r.book.NeedMoreAddrs() { - r.RequestAddrs(p) - } - } else { - // inbound peer is its own source - addr, err := p.NodeInfo().NetAddress() - if err != nil { - r.Logger.Error("Failed to get peer NetAddress", "err", err, "peer", p) - return - } - - // Make it explicit that addr and src are the same for an inbound peer. - src := addr - - // add to book. dont RequestAddrs right away because - // we don't trust inbound as much - let ensurePeersRoutine handle it. - err = r.book.AddAddress(addr, src) - r.logErrAddrBook(err) - } -} - -// RemovePeer implements Reactor by resetting peer's requests info. -func (r *Reactor) RemovePeer(p Peer, reason interface{}) { - id := string(p.ID()) - r.requestsSent.Delete(id) - r.lastReceivedRequests.Delete(id) -} - -func (r *Reactor) logErrAddrBook(err error) { - if err != nil { - switch err.(type) { - case ErrAddrBookNilAddr: - r.Logger.Error("Failed to add new address", "err", err) - default: - // non-routable, self, full book, private, etc. - r.Logger.Debug("Failed to add new address", "err", err) - } - } -} - -// Receive implements Reactor by handling incoming PEX messages. -// XXX: do not call any methods that can block or incur heavy processing. -// https://github.com/tendermint/tendermint/issues/2888 -func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) { - msg, err := decodeMsg(msgBytes) - if err != nil { - r.Logger.Error("Error decoding message", "src", src, "chId", chID, "err", err) - r.Switch.StopPeerForError(src, err) - return - } - r.Logger.Debug("Received message", "src", src, "chId", chID, "msg", msg) - - switch msg := msg.(type) { - case *tmp2p.PexRequest: - - // NOTE: this is a prime candidate for amplification attacks, - // so it's important we - // 1) restrict how frequently peers can request - // 2) limit the output size - - // If we're a seed and this is an inbound peer, - // respond once and disconnect. - if r.config.SeedMode && !src.IsOutbound() { - id := string(src.ID()) - v := r.lastReceivedRequests.Get(id) - if v != nil { - // FlushStop/StopPeer are already - // running in a go-routine. - return - } - r.lastReceivedRequests.Set(id, time.Now()) - - // Send addrs and disconnect - r.SendAddrs(src, r.book.GetSelectionWithBias(biasToSelectNewPeers)) - go func() { - // In a go-routine so it doesn't block .Receive. - src.FlushStop() - r.Switch.StopPeerGracefully(src) - }() - - } else { - // Check we're not receiving requests too frequently. - if err := r.receiveRequest(src); err != nil { - r.Switch.StopPeerForError(src, err) - r.book.MarkBad(src.SocketAddr(), defaultBanTime) - return - } - r.SendAddrs(src, r.book.GetSelection()) - } - - case *tmp2p.PexResponse: - // If we asked for addresses, add them to the book - addrs, err := NetAddressesFromProto(msg.Addresses) - if err != nil { - r.Switch.StopPeerForError(src, err) - r.book.MarkBad(src.SocketAddr(), defaultBanTime) - return - } - err = r.ReceiveAddrs(addrs, src) - if err != nil { - r.Switch.StopPeerForError(src, err) - if err == ErrUnsolicitedList { - r.book.MarkBad(src.SocketAddr(), defaultBanTime) - } - return - } - - default: - r.Logger.Error(fmt.Sprintf("Unknown message type %T", msg)) - } -} - -// enforces a minimum amount of time between requests -func (r *Reactor) receiveRequest(src Peer) error { - id := string(src.ID()) - v := r.lastReceivedRequests.Get(id) - if v == nil { - // initialize with empty time - lastReceived := time.Time{} - r.lastReceivedRequests.Set(id, lastReceived) - return nil - } - - lastReceived := v.(time.Time) - if lastReceived.Equal(time.Time{}) { - // first time gets a free pass. then we start tracking the time - lastReceived = time.Now() - r.lastReceivedRequests.Set(id, lastReceived) - return nil - } - - now := time.Now() - minInterval := r.minReceiveRequestInterval() - if now.Sub(lastReceived) < minInterval { - return fmt.Errorf( - "peer (%v) sent next PEX request too soon. lastReceived: %v, now: %v, minInterval: %v. Disconnecting", - src.ID(), - lastReceived, - now, - minInterval, - ) - } - r.lastReceivedRequests.Set(id, now) - return nil -} - -// RequestAddrs asks peer for more addresses if we do not already have a -// request out for this peer. -func (r *Reactor) RequestAddrs(p Peer) { - id := string(p.ID()) - if r.requestsSent.Has(id) { - return - } - r.Logger.Debug("Request addrs", "from", p) - r.requestsSent.Set(id, struct{}{}) - p.Send(PexChannel, mustEncode(&tmp2p.PexRequest{})) -} - -// ReceiveAddrs adds the given addrs to the addrbook if theres an open -// request for this peer and deletes the open request. -// If there's no open request for the src peer, it returns an error. -func (r *Reactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { - id := string(src.ID()) - if !r.requestsSent.Has(id) { - return ErrUnsolicitedList - } - r.requestsSent.Delete(id) - - srcAddr, err := src.NodeInfo().NetAddress() - if err != nil { - return err - } - - srcIsSeed := false - for _, seedAddr := range r.seedAddrs { - if seedAddr.Equals(srcAddr) { - srcIsSeed = true - break - } - } - - for _, netAddr := range addrs { - // NOTE: we check netAddr validity and routability in book#AddAddress. - err = r.book.AddAddress(netAddr, srcAddr) - if err != nil { - r.logErrAddrBook(err) - // XXX: should we be strict about incoming data and disconnect from a - // peer here too? - continue - } - - // If this address came from a seed node, try to connect to it without - // waiting (#2093) - if srcIsSeed { - r.Logger.Info("Will dial address, which came from seed", "addr", netAddr, "seed", srcAddr) - go func(addr *p2p.NetAddress) { - err := r.dialPeer(addr) - if err != nil { - switch err.(type) { - case errMaxAttemptsToDial, errTooEarlyToDial, p2p.ErrCurrentlyDialingOrExistingAddress: - r.Logger.Debug(err.Error(), "addr", addr) - default: - r.Logger.Error(err.Error(), "addr", addr) - } - } - }(netAddr) - } - } - - return nil -} - -// SendAddrs sends addrs to the peer. -func (r *Reactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) { - p.Send(PexChannel, mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto(netAddrs)})) -} - -// SetEnsurePeersPeriod sets period to ensure peers connected. -func (r *Reactor) SetEnsurePeersPeriod(d time.Duration) { - r.ensurePeersPeriod = d -} - -// Ensures that sufficient peers are connected. (continuous) -func (r *Reactor) ensurePeersRoutine() { - var ( - seed = tmrand.NewRand() - jitter = seed.Int63n(r.ensurePeersPeriod.Nanoseconds()) - ) - - // Randomize first round of communication to avoid thundering herd. - // If no peers are present directly start connecting so we guarantee swift - // setup with the help of configured seeds. - if r.nodeHasSomePeersOrDialingAny() { - time.Sleep(time.Duration(jitter)) - } - - // fire once immediately. - // ensures we dial the seeds right away if the book is empty - r.ensurePeers() - - // fire periodically - ticker := time.NewTicker(r.ensurePeersPeriod) - for { - select { - case <-ticker.C: - r.ensurePeers() - case <-r.Quit(): - ticker.Stop() - return - } - } -} - -// ensurePeers ensures that sufficient peers are connected. (once) -// -// heuristic that we haven't perfected yet, or, perhaps is manually edited by -// the node operator. It should not be used to compute what addresses are -// already connected or not. -func (r *Reactor) ensurePeers() { - var ( - out, in, dial = r.Switch.NumPeers() - numToDial = r.Switch.MaxNumOutboundPeers() - (out + dial) - ) - r.Logger.Info( - "Ensure peers", - "numOutPeers", out, - "numInPeers", in, - "numDialing", dial, - "numToDial", numToDial, - ) - - if numToDial <= 0 { - return - } - - // bias to prefer more vetted peers when we have fewer connections. - // not perfect, but somewhate ensures that we prioritize connecting to more-vetted - // NOTE: range here is [10, 90]. Too high ? - newBias := tmmath.MinInt(out, 8)*10 + 10 - - toDial := make(map[types.NodeID]*p2p.NetAddress) - // Try maxAttempts times to pick numToDial addresses to dial - maxAttempts := numToDial * 3 - - for i := 0; i < maxAttempts && len(toDial) < numToDial; i++ { - try := r.book.PickAddress(newBias) - if try == nil { - continue - } - if _, selected := toDial[try.ID]; selected { - continue - } - if r.Switch.IsDialingOrExistingAddress(try) { - continue - } - // TODO: consider moving some checks from toDial into here - // so we don't even consider dialing peers that we want to wait - // before dialing again, or have dialed too many times already - r.Logger.Info("Will dial address", "addr", try) - toDial[try.ID] = try - } - - // Dial picked addresses - for _, addr := range toDial { - go func(addr *p2p.NetAddress) { - err := r.dialPeer(addr) - if err != nil { - switch err.(type) { - case errMaxAttemptsToDial, errTooEarlyToDial: - r.Logger.Debug(err.Error(), "addr", addr) - default: - r.Logger.Error(err.Error(), "addr", addr) - } - } - }(addr) - } - - if r.book.NeedMoreAddrs() { - // Check if banned nodes can be reinstated - r.book.ReinstateBadPeers() - } - - if r.book.NeedMoreAddrs() { - - // 1) Pick a random peer and ask for more. - peers := r.Switch.Peers().List() - peersCount := len(peers) - if peersCount > 0 { - rand := tmrand.NewRand() - peer := peers[rand.Int()%peersCount] - r.Logger.Info("We need more addresses. Sending pexRequest to random peer", "peer", peer) - r.RequestAddrs(peer) - } - - // 2) Dial seeds if we are not dialing anyone. - // This is done in addition to asking a peer for addresses to work-around - // peers not participating in PEX. - if len(toDial) == 0 { - r.Logger.Info("No addresses to dial. Falling back to seeds") - r.dialSeeds() - } - } -} - -func (r *Reactor) dialAttemptsInfo(addr *p2p.NetAddress) (attempts int, lastDialed time.Time) { - _attempts, ok := r.attemptsToDial.Load(addr.DialString()) - if !ok { - return - } - atd := _attempts.(_attemptsToDial) - return atd.number, atd.lastDialed -} - -func (r *Reactor) dialPeer(addr *p2p.NetAddress) error { - attempts, lastDialed := r.dialAttemptsInfo(addr) - if !r.Switch.IsPeerPersistent(addr) && attempts > maxAttemptsToDial { - r.book.MarkBad(addr, defaultBanTime) - return errMaxAttemptsToDial{} - } - - // exponential backoff if it's not our first attempt to dial given address - if attempts > 0 { - rand := tmrand.NewRand() - jitter := time.Duration(rand.Float64() * float64(time.Second)) // 1s == (1e9 ns) - backoffDuration := jitter + ((1 << uint(attempts)) * time.Second) - backoffDuration = r.maxBackoffDurationForPeer(addr, backoffDuration) - sinceLastDialed := time.Since(lastDialed) - if sinceLastDialed < backoffDuration { - return errTooEarlyToDial{backoffDuration, lastDialed} - } - } - - err := r.Switch.DialPeerWithAddress(addr) - if err != nil { - if _, ok := err.(p2p.ErrCurrentlyDialingOrExistingAddress); ok { - return err - } - - markAddrInBookBasedOnErr(addr, r.book, err) - switch err.(type) { - case p2p.ErrSwitchAuthenticationFailure: - // NOTE: addr is removed from addrbook in markAddrInBookBasedOnErr - r.attemptsToDial.Delete(addr.DialString()) - default: - r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()}) - } - return fmt.Errorf("dialing failed (attempts: %d): %w", attempts+1, err) - } - - // cleanup any history - r.attemptsToDial.Delete(addr.DialString()) - return nil -} - -// maxBackoffDurationForPeer caps the backoff duration for persistent peers. -func (r *Reactor) maxBackoffDurationForPeer(addr *p2p.NetAddress, planned time.Duration) time.Duration { - if r.config.PersistentPeersMaxDialPeriod > 0 && - planned > r.config.PersistentPeersMaxDialPeriod && - r.Switch.IsPeerPersistent(addr) { - return r.config.PersistentPeersMaxDialPeriod - } - return planned -} - -// checkSeeds checks that addresses are well formed. -// Returns number of seeds we can connect to, along with all seeds addrs. -// return err if user provided any badly formatted seed addresses. -// Doesn't error if the seed node can't be reached. -// numOnline returns -1 if no seed nodes were in the initial configuration. -func (r *Reactor) checkSeeds() (numOnline int, netAddrs []*p2p.NetAddress, err error) { - lSeeds := len(r.config.Seeds) - if lSeeds == 0 { - return -1, nil, nil - } - netAddrs, errs := p2p.NewNetAddressStrings(r.config.Seeds) - numOnline = lSeeds - len(errs) - for _, err := range errs { - switch e := err.(type) { - case types.ErrNetAddressLookup: - r.Logger.Error("Connecting to seed failed", "err", e) - default: - return 0, nil, fmt.Errorf("seed node configuration has error: %w", e) - } - } - return numOnline, netAddrs, nil -} - -// randomly dial seeds until we connect to one or exhaust them -func (r *Reactor) dialSeeds() { - rand := tmrand.NewRand() - perm := rand.Perm(len(r.seedAddrs)) - // perm := r.Switch.rng.Perm(lSeeds) - for _, i := range perm { - // dial a random seed - seedAddr := r.seedAddrs[i] - err := r.Switch.DialPeerWithAddress(seedAddr) - - switch err.(type) { - case nil, p2p.ErrCurrentlyDialingOrExistingAddress: - return - } - r.Switch.Logger.Error("Error dialing seed", "err", err, "seed", seedAddr) - } - // do not write error message if there were no seeds specified in config - if len(r.seedAddrs) > 0 { - r.Switch.Logger.Error("Couldn't connect to any seeds") - } -} - -// AttemptsToDial returns the number of attempts to dial specific address. It -// returns 0 if never attempted or successfully connected. -func (r *Reactor) AttemptsToDial(addr *p2p.NetAddress) int { - lAttempts, attempted := r.attemptsToDial.Load(addr.DialString()) - if attempted { - return lAttempts.(_attemptsToDial).number - } - return 0 -} - -//---------------------------------------------------------- - -// Explores the network searching for more peers. (continuous) -// Seed/Crawler Mode causes this node to quickly disconnect -// from peers, except other seed nodes. -func (r *Reactor) crawlPeersRoutine() { - // If we have any seed nodes, consult them first - if len(r.seedAddrs) > 0 { - r.dialSeeds() - } else { - // Do an initial crawl - r.crawlPeers(r.book.GetSelection()) - } - - // Fire periodically - ticker := time.NewTicker(crawlPeerPeriod) - - for { - select { - case <-ticker.C: - r.attemptDisconnects() - r.crawlPeers(r.book.GetSelection()) - r.cleanupCrawlPeerInfos() - case <-r.Quit(): - return - } - } -} - -// nodeHasSomePeersOrDialingAny returns true if the node is connected to some -// peers or dialing them currently. -func (r *Reactor) nodeHasSomePeersOrDialingAny() bool { - out, in, dial := r.Switch.NumPeers() - return out+in+dial > 0 -} - -// crawlPeerInfo handles temporary data needed for the network crawling -// performed during seed/crawler mode. -type crawlPeerInfo struct { - Addr *p2p.NetAddress `json:"addr"` - // The last time we crawled the peer or attempted to do so. - LastCrawled time.Time `json:"last_crawled"` -} - -// crawlPeers will crawl the network looking for new peer addresses. -func (r *Reactor) crawlPeers(addrs []*p2p.NetAddress) { - now := time.Now() - - for _, addr := range addrs { - peerInfo, ok := r.crawlPeerInfos[addr.ID] - - // Do not attempt to connect with peers we recently crawled. - if ok && now.Sub(peerInfo.LastCrawled) < minTimeBetweenCrawls { - continue - } - - // Record crawling attempt. - r.crawlPeerInfos[addr.ID] = crawlPeerInfo{ - Addr: addr, - LastCrawled: now, - } - - err := r.dialPeer(addr) - if err != nil { - switch err.(type) { - case errMaxAttemptsToDial, errTooEarlyToDial, p2p.ErrCurrentlyDialingOrExistingAddress: - r.Logger.Debug(err.Error(), "addr", addr) - default: - r.Logger.Error(err.Error(), "addr", addr) - } - continue - } - - peer := r.Switch.Peers().Get(addr.ID) - if peer != nil { - r.RequestAddrs(peer) - } - } -} - -func (r *Reactor) cleanupCrawlPeerInfos() { - for id, info := range r.crawlPeerInfos { - // If we did not crawl a peer for 24 hours, it means the peer was removed - // from the addrbook => remove - // - // 10000 addresses / maxGetSelection = 40 cycles to get all addresses in - // the ideal case, - // 40 * crawlPeerPeriod ~ 20 minutes - if time.Since(info.LastCrawled) > 24*time.Hour { - delete(r.crawlPeerInfos, id) - } - } -} - -// attemptDisconnects checks if we've been with each peer long enough to disconnect -func (r *Reactor) attemptDisconnects() { - for _, peer := range r.Switch.Peers().List() { - if peer.Status().Duration < r.config.SeedDisconnectWaitPeriod { - continue - } - if peer.IsPersistent() { - continue - } - r.Switch.StopPeerGracefully(peer) - } -} - -func markAddrInBookBasedOnErr(addr *p2p.NetAddress, book AddrBook, err error) { - // TODO: detect more "bad peer" scenarios - switch err.(type) { - case p2p.ErrSwitchAuthenticationFailure: - book.MarkBad(addr, defaultBanTime) - default: - book.MarkAttempt(addr) - } -} - -//----------------------------------------------------------------------------- -// Messages - -// mustEncode proto encodes a tmp2p.Message -func mustEncode(pb proto.Message) []byte { - msg := tmp2p.PexMessage{} - switch pb := pb.(type) { - case *tmp2p.PexRequest: - msg.Sum = &tmp2p.PexMessage_PexRequest{PexRequest: pb} - case *tmp2p.PexResponse: - msg.Sum = &tmp2p.PexMessage_PexResponse{PexResponse: pb} - default: - panic(fmt.Sprintf("Unknown message type %T", pb)) - } - - bz, err := msg.Marshal() - if err != nil { - panic(fmt.Errorf("unable to marshal %T: %w", pb, err)) - } - return bz -} - -func decodeMsg(bz []byte) (proto.Message, error) { - pb := &tmp2p.PexMessage{} - - err := pb.Unmarshal(bz) - if err != nil { - return nil, err - } - - switch msg := pb.Sum.(type) { - case *tmp2p.PexMessage_PexRequest: - return msg.PexRequest, nil - case *tmp2p.PexMessage_PexResponse: - return msg.PexResponse, nil - default: - return nil, fmt.Errorf("unknown message: %T", msg) - } -} - -//----------------------------------------------------------------------------- -// address converters - -// NetAddressFromProto converts a Protobuf PexAddress into a native struct. -func NetAddressFromProto(pb tmp2p.PexAddress) (*types.NetAddress, error) { - ip := net.ParseIP(pb.IP) - if ip == nil { - return nil, fmt.Errorf("invalid IP address %v", pb.IP) - } - if pb.Port >= 1<<16 { - return nil, fmt.Errorf("invalid port number %v", pb.Port) - } - return &types.NetAddress{ - ID: types.NodeID(pb.ID), - IP: ip, - Port: uint16(pb.Port), - }, nil -} - -// NetAddressesFromProto converts a slice of Protobuf PexAddresses into a native slice. -func NetAddressesFromProto(pbs []tmp2p.PexAddress) ([]*types.NetAddress, error) { - nas := make([]*types.NetAddress, 0, len(pbs)) - for _, pb := range pbs { - na, err := NetAddressFromProto(pb) - if err != nil { - return nil, err - } - nas = append(nas, na) - } - return nas, nil -} - -// NetAddressesToProto converts a slice of NetAddresses into a Protobuf PexAddress slice. -func NetAddressesToProto(nas []*types.NetAddress) []tmp2p.PexAddress { - pbs := make([]tmp2p.PexAddress, 0, len(nas)) - for _, na := range nas { - if na != nil { - pbs = append(pbs, tmp2p.PexAddress{ - ID: string(na.ID), - IP: na.IP.String(), - Port: uint32(na.Port), - }) - } - } - return pbs -} diff --git a/internal/p2p/pex/pex_reactor_test.go b/internal/p2p/pex/pex_reactor_test.go deleted file mode 100644 index 56f24457f..000000000 --- a/internal/p2p/pex/pex_reactor_test.go +++ /dev/null @@ -1,680 +0,0 @@ -package pex - -import ( - "encoding/hex" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - "time" - - "github.com/gogo/protobuf/proto" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/mock" - "github.com/tendermint/tendermint/libs/log" - tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" -) - -var ( - cfg *config.P2PConfig -) - -func init() { - cfg = config.DefaultP2PConfig() - cfg.PexReactor = true - cfg.AllowDuplicateIP = true -} - -func TestPEXReactorBasic(t *testing.T) { - r, _ := createReactor(t, &ReactorConfig{}) - - assert.NotNil(t, r) - assert.NotEmpty(t, r.GetChannels()) -} - -func TestPEXReactorAddRemovePeer(t *testing.T) { - r, book := createReactor(t, &ReactorConfig{}) - - size := book.Size() - peer := p2p.CreateRandomPeer(false) - - r.AddPeer(peer) - assert.Equal(t, size+1, book.Size()) - - r.RemovePeer(peer, "peer not available") - - outboundPeer := p2p.CreateRandomPeer(true) - - r.AddPeer(outboundPeer) - assert.Equal(t, size+1, book.Size(), "outbound peers should not be added to the address book") - - r.RemovePeer(outboundPeer, "peer not available") -} - -// --- FAIL: TestPEXReactorRunning (11.10s) -// pex_reactor_test.go:411: expected all switches to be connected to at -// least one peer (switches: 0 => {outbound: 1, inbound: 0}, 1 => -// {outbound: 0, inbound: 1}, 2 => {outbound: 0, inbound: 0}, ) -// -// EXPLANATION: peers are getting rejected because in switch#addPeer we check -// if any peer (who we already connected to) has the same IP. Even though local -// peers have different IP addresses, they all have the same underlying remote -// IP: 127.0.0.1. -// -func TestPEXReactorRunning(t *testing.T) { - N := 3 - switches := make([]*p2p.Switch, N) - - // directory to store address books - dir := tempDir(t) - - books := make([]AddrBook, N) - logger := log.TestingLogger() - - // create switches - for i := 0; i < N; i++ { - switches[i] = p2p.MakeSwitch(cfg, i, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { - books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false) - books[i].SetLogger(logger.With("pex", i)) - sw.SetAddrBook(books[i]) - - sw.SetLogger(logger.With("pex", i)) - - r := NewReactor(books[i], &ReactorConfig{}) - r.SetLogger(logger.With("pex", i)) - r.SetEnsurePeersPeriod(250 * time.Millisecond) - sw.AddReactor("pex", r) - - return sw - }, logger) - } - - for _, sw := range switches { - err := sw.Start() // start switch and reactors - require.Nil(t, err) - } - - addOtherNodeAddrToAddrBook := func(switchIndex, otherSwitchIndex int) { - addr := switches[otherSwitchIndex].NetAddress() - err := books[switchIndex].AddAddress(addr, addr) - require.NoError(t, err) - } - - addOtherNodeAddrToAddrBook(0, 1) - addOtherNodeAddrToAddrBook(1, 0) - addOtherNodeAddrToAddrBook(2, 1) - - assertPeersWithTimeout(t, switches, 10*time.Millisecond, 10*time.Second, N-1) - - // stop them - for _, s := range switches { - err := s.Stop() - require.NoError(t, err) - } -} - -func TestPEXReactorReceive(t *testing.T) { - r, book := createReactor(t, &ReactorConfig{}) - peer := p2p.CreateRandomPeer(false) - - // we have to send a request to receive responses - r.RequestAddrs(peer) - - size := book.Size() - na, err := peer.NodeInfo().NetAddress() - require.NoError(t, err) - msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{na})}) - r.Receive(PexChannel, peer, msg) - assert.Equal(t, size+1, book.Size()) - - msg = mustEncode(&tmp2p.PexRequest{}) - r.Receive(PexChannel, peer, msg) // should not panic. -} - -func TestPEXReactorRequestMessageAbuse(t *testing.T) { - r, book := createReactor(t, &ReactorConfig{}) - sw := createSwitchAndAddReactors(r) - sw.SetAddrBook(book) - - peer := mock.NewPeer(nil) - peerAddr := peer.SocketAddr() - p2p.AddPeerToSwitchPeerSet(sw, peer) - assert.True(t, sw.Peers().Has(peer.ID())) - err := book.AddAddress(peerAddr, peerAddr) - require.NoError(t, err) - require.True(t, book.HasAddress(peerAddr)) - - id := string(peer.ID()) - msg := mustEncode(&tmp2p.PexRequest{}) - - // first time creates the entry - r.Receive(PexChannel, peer, msg) - assert.True(t, r.lastReceivedRequests.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - // next time sets the last time value - r.Receive(PexChannel, peer, msg) - assert.True(t, r.lastReceivedRequests.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - // third time is too many too soon - peer is removed - r.Receive(PexChannel, peer, msg) - assert.False(t, r.lastReceivedRequests.Has(id)) - assert.False(t, sw.Peers().Has(peer.ID())) - assert.True(t, book.IsBanned(peerAddr)) -} - -func TestPEXReactorAddrsMessageAbuse(t *testing.T) { - r, book := createReactor(t, &ReactorConfig{}) - sw := createSwitchAndAddReactors(r) - sw.SetAddrBook(book) - - peer := mock.NewPeer(nil) - p2p.AddPeerToSwitchPeerSet(sw, peer) - assert.True(t, sw.Peers().Has(peer.ID())) - - id := string(peer.ID()) - - // request addrs from the peer - r.RequestAddrs(peer) - assert.True(t, r.requestsSent.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{peer.SocketAddr()})}) - - // receive some addrs. should clear the request - r.Receive(PexChannel, peer, msg) - assert.False(t, r.requestsSent.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - // receiving more unsolicited addrs causes a disconnect and ban - r.Receive(PexChannel, peer, msg) - assert.False(t, sw.Peers().Has(peer.ID())) - assert.True(t, book.IsBanned(peer.SocketAddr())) -} - -func TestCheckSeeds(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - // 1. test creating peer with no seeds works - peerSwitch := testCreateDefaultPeer(dir, 0) - require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests - - // 2. create seed - seed := testCreateSeed(dir, 1, []*p2p.NetAddress{}, []*p2p.NetAddress{}) - - // 3. test create peer with online seed works - peerSwitch = testCreatePeerWithSeed(dir, 2, seed) - require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests - - // 4. test create peer with all seeds having unresolvable DNS fails - badPeerConfig := &ReactorConfig{ - Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", - "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657"}, - } - peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) - require.Error(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests - - // 5. test create peer with one good seed address succeeds - badPeerConfig = &ReactorConfig{ - Seeds: []string{"ed3dfd27bfc4af18f67a49862f04cc100696e84d@bad.network.addr:26657", - "d824b13cb5d40fa1d8a614e089357c7eff31b670@anotherbad.network.addr:26657", - seed.NetAddress().String()}, - } - peerSwitch = testCreatePeerWithConfig(dir, 2, badPeerConfig) - require.Nil(t, peerSwitch.Start()) - peerSwitch.Stop() // nolint:errcheck // ignore for tests -} - -func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - // 1. create seed - seed := testCreateSeed(dir, 0, []*p2p.NetAddress{}, []*p2p.NetAddress{}) - require.Nil(t, seed.Start()) - t.Cleanup(func() { _ = seed.Stop() }) - - // 2. create usual peer with only seed configured. - peer := testCreatePeerWithSeed(dir, 1, seed) - require.Nil(t, peer.Start()) - t.Cleanup(func() { _ = peer.Stop() }) - - // 3. check that the peer connects to seed immediately - assertPeersWithTimeout(t, []*p2p.Switch{peer}, 10*time.Millisecond, 3*time.Second, 1) -} - -func TestConnectionSpeedForPeerReceivedFromSeed(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - // 1. create peer - peerSwitch := testCreateDefaultPeer(dir, 1) - require.Nil(t, peerSwitch.Start()) - t.Cleanup(func() { _ = peerSwitch.Stop() }) - - // 2. Create seed which knows about the peer - peerAddr := peerSwitch.NetAddress() - seed := testCreateSeed(dir, 2, []*p2p.NetAddress{peerAddr}, []*p2p.NetAddress{peerAddr}) - require.Nil(t, seed.Start()) - t.Cleanup(func() { _ = seed.Stop() }) - - // 3. create another peer with only seed configured. - secondPeer := testCreatePeerWithSeed(dir, 3, seed) - require.Nil(t, secondPeer.Start()) - t.Cleanup(func() { _ = secondPeer.Stop() }) - - // 4. check that the second peer connects to seed immediately - assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 3*time.Second, 1) - - // 5. check that the second peer connects to the first peer immediately - assertPeersWithTimeout(t, []*p2p.Switch{secondPeer}, 10*time.Millisecond, 1*time.Second, 2) -} - -func TestPEXReactorSeedMode(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - pexRConfig := &ReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 10 * time.Millisecond} - pexR, book := createReactor(t, pexRConfig) - sw := createSwitchAndAddReactors(pexR) - - sw.SetAddrBook(book) - require.NoError(t, sw.Start()) - t.Cleanup(func() { _ = sw.Stop() }) - - assert.Zero(t, sw.Peers().Size()) - - peerSwitch := testCreateDefaultPeer(dir, 1) - require.NoError(t, peerSwitch.Start()) - t.Cleanup(func() { _ = peerSwitch.Stop() }) - - // 1. Test crawlPeers dials the peer - pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()}) - assert.Equal(t, 1, sw.Peers().Size()) - assert.True(t, sw.Peers().Has(peerSwitch.NodeInfo().ID())) - - // 2. attemptDisconnects should not disconnect because of wait period - pexR.attemptDisconnects() - assert.Equal(t, 1, sw.Peers().Size()) - - // sleep for SeedDisconnectWaitPeriod - time.Sleep(pexRConfig.SeedDisconnectWaitPeriod + 1*time.Millisecond) - - // 3. attemptDisconnects should disconnect after wait period - pexR.attemptDisconnects() - assert.Equal(t, 0, sw.Peers().Size()) -} - -func TestPEXReactorDoesNotDisconnectFromPersistentPeerInSeedMode(t *testing.T) { - // directory to store address books - dir := tempDir(t) - - pexRConfig := &ReactorConfig{SeedMode: true, SeedDisconnectWaitPeriod: 1 * time.Millisecond} - pexR, book := createReactor(t, pexRConfig) - sw := createSwitchAndAddReactors(pexR) - - sw.SetAddrBook(book) - require.NoError(t, sw.Start()) - t.Cleanup(func() { _ = sw.Stop() }) - - assert.Zero(t, sw.Peers().Size()) - - peerSwitch := testCreatePeerWithConfig(dir, 1, pexRConfig) - require.NoError(t, peerSwitch.Start()) - t.Cleanup(func() { _ = peerSwitch.Stop() }) - - require.NoError(t, sw.AddPersistentPeers([]string{peerSwitch.NetAddress().String()})) - - // 1. Test crawlPeers dials the peer - pexR.crawlPeers([]*p2p.NetAddress{peerSwitch.NetAddress()}) - assert.Equal(t, 1, sw.Peers().Size()) - assert.True(t, sw.Peers().Has(peerSwitch.NodeInfo().ID())) - - // sleep for SeedDisconnectWaitPeriod - time.Sleep(pexRConfig.SeedDisconnectWaitPeriod + 1*time.Millisecond) - - // 2. attemptDisconnects should not disconnect because the peer is persistent - pexR.attemptDisconnects() - assert.Equal(t, 1, sw.Peers().Size()) -} - -func TestPEXReactorDialsPeerUpToMaxAttemptsInSeedMode(t *testing.T) { - // directory to store address books - pexR, book := createReactor(t, &ReactorConfig{SeedMode: true}) - sw := createSwitchAndAddReactors(pexR) - - sw.SetAddrBook(book) - // No need to start sw since crawlPeers is called manually here. - - peer := mock.NewPeer(nil) - addr := peer.SocketAddr() - - require.NoError(t, book.AddAddress(addr, addr)) - - assert.True(t, book.HasAddress(addr)) - - // imitate maxAttemptsToDial reached - pexR.attemptsToDial.Store(addr.DialString(), _attemptsToDial{maxAttemptsToDial + 1, time.Now()}) - pexR.crawlPeers([]*p2p.NetAddress{addr}) - - assert.False(t, book.HasAddress(addr)) -} - -// connect a peer to a seed, wait a bit, then stop it. -// this should give it time to request addrs and for the seed -// to call FlushStop, and allows us to test calling Stop concurrently -// with FlushStop. Before a fix, this non-deterministically reproduced -// https://github.com/tendermint/tendermint/issues/3231. -func TestPEXReactorSeedModeFlushStop(t *testing.T) { - t.Skip("flaky test, will be replaced by new P2P stack") - N := 2 - switches := make([]*p2p.Switch, N) - - // directory to store address books - dir := tempDir(t) - - books := make([]AddrBook, N) - logger := log.TestingLogger() - - // create switches - for i := 0; i < N; i++ { - switches[i] = p2p.MakeSwitch(cfg, i, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { - books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false) - books[i].SetLogger(logger.With("pex", i)) - sw.SetAddrBook(books[i]) - - sw.SetLogger(logger.With("pex", i)) - - config := &ReactorConfig{} - if i == 0 { - // first one is a seed node - config = &ReactorConfig{SeedMode: true} - } - r := NewReactor(books[i], config) - r.SetLogger(logger.With("pex", i)) - r.SetEnsurePeersPeriod(250 * time.Millisecond) - sw.AddReactor("pex", r) - - return sw - }, logger) - } - - for _, sw := range switches { - err := sw.Start() // start switch and reactors - require.Nil(t, err) - } - - reactor := switches[0].Reactors()["pex"].(*Reactor) - peerID := switches[1].NodeInfo().ID() - - assert.NoError(t, switches[1].DialPeerWithAddress(switches[0].NetAddress())) - - // sleep up to a second while waiting for the peer to send us a message. - // this isn't perfect since it's possible the peer sends us a msg and we FlushStop - // before this loop catches it. but non-deterministically it works pretty well. - for i := 0; i < 1000; i++ { - v := reactor.lastReceivedRequests.Get(string(peerID)) - if v != nil { - break - } - time.Sleep(time.Millisecond) - } - - // by now the FlushStop should have happened. Try stopping the peer. - // it should be safe to do this. - peers := switches[0].Peers().List() - for _, peer := range peers { - err := peer.Stop() - require.NoError(t, err) - } - - // stop the switches - for _, s := range switches { - err := s.Stop() - require.NoError(t, err) - } -} - -func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { - peer := p2p.CreateRandomPeer(false) - - pexR, book := createReactor(t, &ReactorConfig{}) - book.AddPrivateIDs([]string{string(peer.NodeInfo().ID())}) - - // we have to send a request to receive responses - pexR.RequestAddrs(peer) - - size := book.Size() - na, err := peer.NodeInfo().NetAddress() - require.NoError(t, err) - - msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{na})}) - pexR.Receive(PexChannel, peer, msg) - assert.Equal(t, size, book.Size()) - - pexR.AddPeer(peer) - assert.Equal(t, size, book.Size()) -} - -func TestPEXReactorDialPeer(t *testing.T) { - pexR, book := createReactor(t, &ReactorConfig{}) - sw := createSwitchAndAddReactors(pexR) - - sw.SetAddrBook(book) - - peer := mock.NewPeer(nil) - addr := peer.SocketAddr() - - assert.Equal(t, 0, pexR.AttemptsToDial(addr)) - - // 1st unsuccessful attempt - err := pexR.dialPeer(addr) - require.Error(t, err) - - assert.Equal(t, 1, pexR.AttemptsToDial(addr)) - - // 2nd unsuccessful attempt - err = pexR.dialPeer(addr) - require.Error(t, err) - - // must be skipped because it is too early - assert.Equal(t, 1, pexR.AttemptsToDial(addr)) - - if !testing.Short() { - time.Sleep(3 * time.Second) - - // 3rd attempt - err = pexR.dialPeer(addr) - require.Error(t, err) - - assert.Equal(t, 2, pexR.AttemptsToDial(addr)) - } -} - -func assertPeersWithTimeout( - t *testing.T, - switches []*p2p.Switch, - checkPeriod, timeout time.Duration, - nPeers int, -) { - var ( - ticker = time.NewTicker(checkPeriod) - remaining = timeout - ) - - for { - select { - case <-ticker.C: - // check peers are connected - allGood := true - for _, s := range switches { - outbound, inbound, _ := s.NumPeers() - if outbound+inbound < nPeers { - allGood = false - break - } - } - remaining -= checkPeriod - if remaining < 0 { - remaining = 0 - } - if allGood { - return - } - case <-time.After(remaining): - numPeersStr := "" - for i, s := range switches { - outbound, inbound, _ := s.NumPeers() - numPeersStr += fmt.Sprintf("%d => {outbound: %d, inbound: %d}, ", i, outbound, inbound) - } - t.Errorf( - "expected all switches to be connected to at least %d peer(s) (switches: %s)", - nPeers, numPeersStr, - ) - return - } - } -} - -// Creates a peer with the provided config -func testCreatePeerWithConfig(dir string, id int, config *ReactorConfig) *p2p.Switch { - peer := p2p.MakeSwitch( - cfg, - id, - "127.0.0.1", - "123.123.123", - func(i int, sw *p2p.Switch) *p2p.Switch { - book := NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", id)), false) - book.SetLogger(log.TestingLogger()) - sw.SetAddrBook(book) - - sw.SetLogger(log.TestingLogger()) - - r := NewReactor( - book, - config, - ) - r.SetLogger(log.TestingLogger()) - sw.AddReactor("pex", r) - return sw - }, - log.TestingLogger(), - ) - return peer -} - -// Creates a peer with the default config -func testCreateDefaultPeer(dir string, id int) *p2p.Switch { - return testCreatePeerWithConfig(dir, id, &ReactorConfig{}) -} - -// Creates a seed which knows about the provided addresses / source address pairs. -// Starting and stopping the seed is left to the caller -func testCreateSeed(dir string, id int, knownAddrs, srcAddrs []*p2p.NetAddress) *p2p.Switch { - seed := p2p.MakeSwitch( - cfg, - id, - "127.0.0.1", - "123.123.123", - func(i int, sw *p2p.Switch) *p2p.Switch { - book := NewAddrBook(filepath.Join(dir, "addrbookSeed.json"), false) - book.SetLogger(log.TestingLogger()) - for j := 0; j < len(knownAddrs); j++ { - book.AddAddress(knownAddrs[j], srcAddrs[j]) // nolint:errcheck // ignore for tests - book.MarkGood(knownAddrs[j].ID) - } - sw.SetAddrBook(book) - - sw.SetLogger(log.TestingLogger()) - - r := NewReactor(book, &ReactorConfig{}) - r.SetLogger(log.TestingLogger()) - sw.AddReactor("pex", r) - return sw - }, - log.TestingLogger(), - ) - return seed -} - -// Creates a peer which knows about the provided seed. -// Starting and stopping the peer is left to the caller -func testCreatePeerWithSeed(dir string, id int, seed *p2p.Switch) *p2p.Switch { - conf := &ReactorConfig{ - Seeds: []string{seed.NetAddress().String()}, - } - return testCreatePeerWithConfig(dir, id, conf) -} - -func createReactor(t *testing.T, conf *ReactorConfig) (r *Reactor, book AddrBook) { - // directory to store address book - book = NewAddrBook(filepath.Join(tempDir(t), "addrbook.json"), true) - book.SetLogger(log.TestingLogger()) - - r = NewReactor(book, conf) - r.SetLogger(log.TestingLogger()) - return -} - -func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch { - sw := p2p.MakeSwitch(cfg, 0, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { - for _, r := range reactors { - sw.AddReactor(r.String(), r) - } - return sw - }, log.TestingLogger()) - return sw -} - -func TestPexVectors(t *testing.T) { - addr := tmp2p.PexAddress{ - ID: "1", - IP: "127.0.0.1", - Port: 9090, - } - - testCases := []struct { - testName string - msg proto.Message - expBytes string - }{ - {"PexRequest", &tmp2p.PexRequest{}, "0a00"}, - {"PexAddrs", &tmp2p.PexResponse{Addresses: []tmp2p.PexAddress{addr}}, "12130a110a013112093132372e302e302e31188247"}, - } - - for _, tc := range testCases { - tc := tc - - bz := mustEncode(tc.msg) - - require.Equal(t, tc.expBytes, hex.EncodeToString(bz), tc.testName) - } -} - -// FIXME: This function is used in place of testing.TB.TempDir() -// as the latter seems to cause test cases to fail when it is -// unable to remove the temporary directory once the test case -// execution terminates. This seems to happen often with pex -// reactor test cases. -// -// References: -// https://github.com/tendermint/tendermint/pull/5733 -// https://github.com/tendermint/tendermint/issues/5732 -func tempDir(t *testing.T) string { - t.Helper() - dir, err := ioutil.TempDir("", "") - require.NoError(t, err) - t.Cleanup(func() { _ = os.RemoveAll(dir) }) - return dir -} diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index 8cff2f95b..645cc19e1 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -17,13 +17,29 @@ import ( ) var ( - _ service.Service = (*ReactorV2)(nil) + _ service.Service = (*Reactor)(nil) _ p2p.Wrapper = (*protop2p.PexMessage)(nil) ) // TODO: Consolidate with params file. // See https://github.com/tendermint/tendermint/issues/6371 const ( + // PexChannel is a channel for PEX messages + PexChannel = 0x00 + + // over-estimate of max NetAddress size + // hexID (40) + IP (16) + Port (2) + Name (100) ... + // NOTE: dont use massive DNS name .. + maxAddressSize = 256 + + // max addresses returned by GetSelection + // NOTE: this must match "maxMsgSize" + maxGetSelection = 250 + + // NOTE: amplification factor! + // small request results in up to maxMsgSize response + maxMsgSize = maxAddressSize * maxGetSelection + // the minimum time one peer can send another request to the same peer minReceiveRequestInterval = 100 * time.Millisecond @@ -46,22 +62,17 @@ const ( // within each reactor (as they are now) or, considering that the reactor doesn't // really need to care about the channel descriptors, if they should be housed // in the node module. -func ChannelDescriptor() conn.ChannelDescriptor { - return conn.ChannelDescriptor{ +func ChannelDescriptor() *conn.ChannelDescriptor { + return &conn.ChannelDescriptor{ ID: PexChannel, + MessageType: new(protop2p.PexMessage), Priority: 1, SendQueueCapacity: 10, RecvMessageCapacity: maxMsgSize, - RecvBufferCapacity: 32, - MaxSendBytes: 200, + RecvBufferCapacity: 128, } } -// ReactorV2 is a PEX reactor for the new P2P stack. The legacy reactor -// is Reactor. -// -// FIXME: Rename this when Reactor is removed, and consider moving to p2p/. -// // The peer exchange or PEX reactor supports the peer manager by sending // requests to other peers for addresses that can be given to the peer manager // and at the same time advertises addresses to peers that need more. @@ -70,7 +81,7 @@ func ChannelDescriptor() conn.ChannelDescriptor { // increasing the interval between each request. It tracks connected peers via // a linked list, sending a request to the node at the front of the list and // adding it to the back of the list once a response is received. -type ReactorV2 struct { +type Reactor struct { service.BaseService peerManager *p2p.PeerManager @@ -109,14 +120,14 @@ type ReactorV2 struct { } // NewReactor returns a reference to a new reactor. -func NewReactorV2( +func NewReactor( logger log.Logger, peerManager *p2p.PeerManager, pexCh *p2p.Channel, peerUpdates *p2p.PeerUpdates, -) *ReactorV2 { +) *Reactor { - r := &ReactorV2{ + r := &Reactor{ peerManager: peerManager, pexCh: pexCh, peerUpdates: peerUpdates, @@ -134,7 +145,7 @@ func NewReactorV2( // envelopes on each. In addition, it also listens for peer updates and handles // messages on that p2p channel accordingly. The caller must be sure to execute // OnStop to ensure the outbound p2p Channels are closed. -func (r *ReactorV2) OnStart() error { +func (r *Reactor) OnStart() error { go r.processPexCh() go r.processPeerUpdates() return nil @@ -142,7 +153,7 @@ func (r *ReactorV2) OnStart() error { // OnStop stops the reactor by signaling to all spawned goroutines to exit and // blocking until they all exit. -func (r *ReactorV2) OnStop() { +func (r *Reactor) OnStop() { // Close closeCh to signal to all spawned goroutines to gracefully exit. All // p2p Channels should execute Close(). close(r.closeCh) @@ -156,7 +167,7 @@ func (r *ReactorV2) OnStop() { // processPexCh implements a blocking event loop where we listen for p2p // Envelope messages from the pexCh. -func (r *ReactorV2) processPexCh() { +func (r *Reactor) processPexCh() { defer r.pexCh.Close() for { @@ -186,7 +197,7 @@ func (r *ReactorV2) processPexCh() { // processPeerUpdates initiates a blocking process where we listen for and handle // PeerUpdate messages. When the reactor is stopped, we will catch the signal and // close the p2p PeerUpdatesCh gracefully. -func (r *ReactorV2) processPeerUpdates() { +func (r *Reactor) processPeerUpdates() { defer r.peerUpdates.Close() for { @@ -202,7 +213,7 @@ func (r *ReactorV2) processPeerUpdates() { } // handlePexMessage handles envelopes sent from peers on the PexChannel. -func (r *ReactorV2) handlePexMessage(envelope p2p.Envelope) error { +func (r *Reactor) handlePexMessage(envelope p2p.Envelope) error { logger := r.Logger.With("peer", envelope.From) switch msg := envelope.Message.(type) { @@ -321,7 +332,7 @@ func (r *ReactorV2) handlePexMessage(envelope p2p.Envelope) error { // // FIXME: We may want to cache and parallelize this, but for now we'll just rely // on the operating system to cache it for us. -func (r *ReactorV2) resolve(addresses []p2p.NodeAddress) []protop2p.PexAddress { +func (r *Reactor) resolve(addresses []p2p.NodeAddress) []protop2p.PexAddress { limit := len(addresses) pexAddresses := make([]protop2p.PexAddress, 0, limit) @@ -364,7 +375,7 @@ func (r *ReactorV2) resolve(addresses []p2p.NodeAddress) []protop2p.PexAddress { // handleMessage handles an Envelope sent from a peer on a specific p2p Channel. // It will handle errors and any possible panics gracefully. A caller can handle // any error returned by sending a PeerError on the respective channel. -func (r *ReactorV2) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { +func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) { defer func() { if e := recover(); e != nil { err = fmt.Errorf("panic in processing message: %v", e) @@ -391,7 +402,7 @@ func (r *ReactorV2) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (er // processPeerUpdate processes a PeerUpdate. For added peers, PeerStatusUp, we // send a request for addresses. -func (r *ReactorV2) processPeerUpdate(peerUpdate p2p.PeerUpdate) { +func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { r.Logger.Debug("received PEX peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) r.mtx.Lock() @@ -408,7 +419,7 @@ func (r *ReactorV2) processPeerUpdate(peerUpdate p2p.PeerUpdate) { } } -func (r *ReactorV2) waitUntilNextRequest() <-chan time.Time { +func (r *Reactor) waitUntilNextRequest() <-chan time.Time { return time.After(time.Until(r.nextRequestTime)) } @@ -416,7 +427,7 @@ func (r *ReactorV2) waitUntilNextRequest() <-chan time.Time { // peer a request for more peer addresses. The function then moves the // peer into the requestsSent bucket and calculates when the next request // time should be -func (r *ReactorV2) sendRequestForPeers() { +func (r *Reactor) sendRequestForPeers() { r.mtx.Lock() defer r.mtx.Unlock() if len(r.availablePeers) == 0 { @@ -464,7 +475,7 @@ func (r *ReactorV2) sendRequestForPeers() { // new nodes will plummet to a very small number, meaning the interval expands // to its upper bound. // CONTRACT: Must use a write lock as nextRequestTime is updated -func (r *ReactorV2) calculateNextRequestTime() { +func (r *Reactor) calculateNextRequestTime() { // check if the peer store is full. If so then there is no need // to send peer requests too often if ratio := r.peerManager.PeerRatio(); ratio >= 0.95 { @@ -500,7 +511,7 @@ func (r *ReactorV2) calculateNextRequestTime() { r.nextRequestTime = time.Now().Add(baseTime * time.Duration(r.discoveryRatio)) } -func (r *ReactorV2) markPeerRequest(peer types.NodeID) error { +func (r *Reactor) markPeerRequest(peer types.NodeID) error { r.mtx.Lock() defer r.mtx.Unlock() if lastRequestTime, ok := r.lastReceivedRequests[peer]; ok { @@ -513,7 +524,7 @@ func (r *ReactorV2) markPeerRequest(peer types.NodeID) error { return nil } -func (r *ReactorV2) markPeerResponse(peer types.NodeID) error { +func (r *Reactor) markPeerResponse(peer types.NodeID) error { r.mtx.Lock() defer r.mtx.Unlock() // check if a request to this peer was sent @@ -530,7 +541,7 @@ func (r *ReactorV2) markPeerResponse(peer types.NodeID) error { // all addresses must use a MCONN protocol for the peer to be considered part of the // legacy p2p pex system -func (r *ReactorV2) isLegacyPeer(peer types.NodeID) bool { +func (r *Reactor) isLegacyPeer(peer types.NodeID) bool { for _, addr := range r.peerManager.Addresses(peer) { if addr.Protocol != p2p.MConnProtocol { return false diff --git a/internal/p2p/pex/reactor_test.go b/internal/p2p/pex/reactor_test.go index a5acb0d5e..b7e1a01c3 100644 --- a/internal/p2p/pex/reactor_test.go +++ b/internal/p2p/pex/reactor_test.go @@ -14,7 +14,7 @@ import ( "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/internal/p2p/pex" "github.com/tendermint/tendermint/libs/log" - proto "github.com/tendermint/tendermint/proto/tendermint/p2p" + p2pproto "github.com/tendermint/tendermint/proto/tendermint/p2p" "github.com/tendermint/tendermint/types" ) @@ -45,7 +45,7 @@ func TestReactorBasic(t *testing.T) { // assert that when a mock node sends a request it receives a response (and // the correct one) testNet.sendRequest(t, firstNode, secondNode, true) - testNet.listenForResponse(t, secondNode, firstNode, shortWait, []proto.PexAddressV2(nil)) + testNet.listenForResponse(t, secondNode, firstNode, shortWait, []p2pproto.PexAddressV2(nil)) } func TestReactorConnectFullNetwork(t *testing.T) { @@ -71,17 +71,17 @@ func TestReactorSendsRequestsTooOften(t *testing.T) { r.pexInCh <- p2p.Envelope{ From: badNode, - Message: &proto.PexRequestV2{}, + Message: &p2pproto.PexRequestV2{}, } resp := <-r.pexOutCh - msg, ok := resp.Message.(*proto.PexResponseV2) + msg, ok := resp.Message.(*p2pproto.PexResponseV2) require.True(t, ok) require.Empty(t, msg.Addresses) r.pexInCh <- p2p.Envelope{ From: badNode, - Message: &proto.PexRequestV2{}, + Message: &p2pproto.PexRequestV2{}, } peerErr := <-r.pexErrCh @@ -136,10 +136,10 @@ func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { require.NoError(t, err) require.True(t, added) - addresses := make([]proto.PexAddressV2, 101) + addresses := make([]p2pproto.PexAddressV2, 101) for i := 0; i < len(addresses); i++ { nodeAddress := p2p.NodeAddress{Protocol: p2p.MemoryProtocol, NodeID: randomNodeID(t)} - addresses[i] = proto.PexAddressV2{ + addresses[i] = p2pproto.PexAddressV2{ URL: nodeAddress.String(), } } @@ -152,12 +152,12 @@ func TestReactorErrorsOnReceivingTooManyPeers(t *testing.T) { select { // wait for a request and then send a response with too many addresses case req := <-r.pexOutCh: - if _, ok := req.Message.(*proto.PexRequestV2); !ok { + if _, ok := req.Message.(*p2pproto.PexRequestV2); !ok { t.Fatal("expected v2 pex request") } r.pexInCh <- p2p.Envelope{ From: peer.NodeID, - Message: &proto.PexResponseV2{ + Message: &p2pproto.PexResponseV2{ Addresses: addresses, }, } @@ -272,7 +272,7 @@ func TestReactorIntegrationWithLegacyHandleResponse(t *testing.T) { } type singleTestReactor struct { - reactor *pex.ReactorV2 + reactor *pex.Reactor pexInCh chan p2p.Envelope pexOutCh chan p2p.Envelope pexErrCh chan p2p.PeerError @@ -290,7 +290,7 @@ func setupSingle(t *testing.T) *singleTestReactor { pexErrCh := make(chan p2p.PeerError, chBuf) pexCh := p2p.NewChannel( p2p.ChannelID(pex.PexChannel), - new(proto.PexMessage), + new(p2pproto.PexMessage), pexInCh, pexOutCh, pexErrCh, @@ -301,7 +301,7 @@ func setupSingle(t *testing.T) *singleTestReactor { peerManager, err := p2p.NewPeerManager(nodeID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) - reactor := pex.NewReactorV2(log.TestingLogger(), peerManager, pexCh, peerUpdates) + reactor := pex.NewReactor(log.TestingLogger(), peerManager, pexCh, peerUpdates) require.NoError(t, reactor.Start()) t.Cleanup(func() { err := reactor.Stop() @@ -327,7 +327,7 @@ type reactorTestSuite struct { network *p2ptest.Network logger log.Logger - reactors map[types.NodeID]*pex.ReactorV2 + reactors map[types.NodeID]*pex.Reactor pexChannels map[types.NodeID]*p2p.Channel peerChans map[types.NodeID]chan p2p.PeerUpdate @@ -370,7 +370,7 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { rts := &reactorTestSuite{ logger: log.TestingLogger().With("testCase", t.Name()), network: p2ptest.MakeNetwork(t, networkOpts), - reactors: make(map[types.NodeID]*pex.ReactorV2, realNodes), + reactors: make(map[types.NodeID]*pex.Reactor, realNodes), pexChannels: make(map[types.NodeID]*p2p.Channel, opts.TotalNodes), peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, opts.TotalNodes), peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, opts.TotalNodes), @@ -380,9 +380,7 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { // NOTE: we don't assert that the channels get drained after stopping the // reactor - rts.pexChannels = rts.network.MakeChannelsNoCleanup( - t, pex.ChannelDescriptor(), new(proto.PexMessage), chBuf, - ) + rts.pexChannels = rts.network.MakeChannelsNoCleanup(t, pex.ChannelDescriptor()) idx := 0 for nodeID := range rts.network.Nodes { @@ -394,7 +392,7 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { if idx < opts.MockNodes { rts.mocks = append(rts.mocks, nodeID) } else { - rts.reactors[nodeID] = pex.NewReactorV2( + rts.reactors[nodeID] = pex.NewReactor( rts.logger.With("nodeID", nodeID), rts.network.Nodes[nodeID].PeerManager, rts.pexChannels[nodeID], @@ -446,13 +444,11 @@ func (r *reactorTestSuite) addNodes(t *testing.T, nodes int) { }) r.network.Nodes[node.NodeID] = node nodeID := node.NodeID - r.pexChannels[nodeID] = node.MakeChannelNoCleanup( - t, pex.ChannelDescriptor(), new(proto.PexMessage), r.opts.BufferSize, - ) + r.pexChannels[nodeID] = node.MakeChannelNoCleanup(t, pex.ChannelDescriptor()) r.peerChans[nodeID] = make(chan p2p.PeerUpdate, r.opts.BufferSize) r.peerUpdates[nodeID] = p2p.NewPeerUpdates(r.peerChans[nodeID], r.opts.BufferSize) r.network.Nodes[nodeID].PeerManager.Register(r.peerUpdates[nodeID]) - r.reactors[nodeID] = pex.NewReactorV2( + r.reactors[nodeID] = pex.NewReactor( r.logger.With("nodeID", nodeID), r.network.Nodes[nodeID].PeerManager, r.pexChannels[nodeID], @@ -488,11 +484,11 @@ func (r *reactorTestSuite) listenForRequest(t *testing.T, fromNode, toNode int, r.logger.Info("Listening for request", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*proto.PexRequestV2) + _, ok := msg.Message.(*p2pproto.PexRequestV2) return ok && msg.From == from } assertion := func(t *testing.T, msg p2p.Envelope) bool { - require.Equal(t, &proto.PexRequestV2{}, msg.Message) + require.Equal(t, &p2pproto.PexRequestV2{}, msg.Message) return true } r.listenFor(t, to, conditional, assertion, waitPeriod) @@ -507,11 +503,11 @@ func (r *reactorTestSuite) pingAndlistenForNAddresses( r.logger.Info("Listening for addresses", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*proto.PexResponseV2) + _, ok := msg.Message.(*p2pproto.PexResponseV2) return ok && msg.From == from } assertion := func(t *testing.T, msg p2p.Envelope) bool { - m, ok := msg.Message.(*proto.PexResponseV2) + m, ok := msg.Message.(*p2pproto.PexResponseV2) if !ok { require.Fail(t, "expected pex response v2") return true @@ -534,17 +530,17 @@ func (r *reactorTestSuite) listenForResponse( t *testing.T, fromNode, toNode int, waitPeriod time.Duration, - addresses []proto.PexAddressV2, + addresses []p2pproto.PexAddressV2, ) { r.logger.Info("Listening for response", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*proto.PexResponseV2) + _, ok := msg.Message.(*p2pproto.PexResponseV2) r.logger.Info("message", msg, "ok", ok) return ok && msg.From == from } assertion := func(t *testing.T, msg p2p.Envelope) bool { - require.Equal(t, &proto.PexResponseV2{Addresses: addresses}, msg.Message) + require.Equal(t, &p2pproto.PexResponseV2{Addresses: addresses}, msg.Message) return true } r.listenFor(t, to, conditional, assertion, waitPeriod) @@ -554,16 +550,16 @@ func (r *reactorTestSuite) listenForLegacyResponse( t *testing.T, fromNode, toNode int, waitPeriod time.Duration, - addresses []proto.PexAddress, + addresses []p2pproto.PexAddress, ) { r.logger.Info("Listening for response", "from", fromNode, "to", toNode) to, from := r.checkNodePair(t, toNode, fromNode) conditional := func(msg p2p.Envelope) bool { - _, ok := msg.Message.(*proto.PexResponse) + _, ok := msg.Message.(*p2pproto.PexResponse) return ok && msg.From == from } assertion := func(t *testing.T, msg p2p.Envelope) bool { - require.Equal(t, &proto.PexResponse{Addresses: addresses}, msg.Message) + require.Equal(t, &p2pproto.PexResponse{Addresses: addresses}, msg.Message) return true } r.listenFor(t, to, conditional, assertion, waitPeriod) @@ -595,26 +591,26 @@ func (r *reactorTestSuite) listenForPeerUpdate( } } -func (r *reactorTestSuite) getV2AddressesFor(nodes []int) []proto.PexAddressV2 { - addresses := make([]proto.PexAddressV2, len(nodes)) +func (r *reactorTestSuite) getV2AddressesFor(nodes []int) []p2pproto.PexAddressV2 { + addresses := make([]p2pproto.PexAddressV2, len(nodes)) for idx, node := range nodes { nodeID := r.nodes[node] - addresses[idx] = proto.PexAddressV2{ + addresses[idx] = p2pproto.PexAddressV2{ URL: r.network.Nodes[nodeID].NodeAddress.String(), } } return addresses } -func (r *reactorTestSuite) getAddressesFor(t *testing.T, nodes []int) []proto.PexAddress { - addresses := make([]proto.PexAddress, len(nodes)) +func (r *reactorTestSuite) getAddressesFor(t *testing.T, nodes []int) []p2pproto.PexAddress { + addresses := make([]p2pproto.PexAddress, len(nodes)) for idx, node := range nodes { nodeID := r.nodes[node] nodeAddrs := r.network.Nodes[nodeID].NodeAddress endpoints, err := nodeAddrs.Resolve(context.Background()) require.NoError(t, err) require.Len(t, endpoints, 1) - addresses[idx] = proto.PexAddress{ + addresses[idx] = p2pproto.PexAddress{ ID: string(nodeAddrs.NodeID), IP: endpoints[0].IP.String(), Port: uint32(endpoints[0].Port), @@ -628,12 +624,12 @@ func (r *reactorTestSuite) sendRequest(t *testing.T, fromNode, toNode int, v2 bo if v2 { r.pexChannels[from].Out <- p2p.Envelope{ To: to, - Message: &proto.PexRequestV2{}, + Message: &p2pproto.PexRequestV2{}, } } else { r.pexChannels[from].Out <- p2p.Envelope{ To: to, - Message: &proto.PexRequest{}, + Message: &p2pproto.PexRequest{}, } } } @@ -649,7 +645,7 @@ func (r *reactorTestSuite) sendResponse( addrs := r.getV2AddressesFor(withNodes) r.pexChannels[from].Out <- p2p.Envelope{ To: to, - Message: &proto.PexResponseV2{ + Message: &p2pproto.PexResponseV2{ Addresses: addrs, }, } @@ -657,7 +653,7 @@ func (r *reactorTestSuite) sendResponse( addrs := r.getAddressesFor(t, withNodes) r.pexChannels[from].Out <- p2p.Envelope{ To: to, - Message: &proto.PexResponse{ + Message: &p2pproto.PexResponse{ Addresses: addrs, }, } @@ -764,8 +760,8 @@ func (r *reactorTestSuite) connectPeers(t *testing.T, sourceNode, targetNode int } // nolint: unused -func (r *reactorTestSuite) pexAddresses(t *testing.T, nodeIndices []int) []proto.PexAddress { - var addresses []proto.PexAddress +func (r *reactorTestSuite) pexAddresses(t *testing.T, nodeIndices []int) []p2pproto.PexAddress { + var addresses []p2pproto.PexAddress for _, i := range nodeIndices { if i < len(r.nodes) { require.Fail(t, "index for pex address is greater than number of nodes") @@ -777,7 +773,7 @@ func (r *reactorTestSuite) pexAddresses(t *testing.T, nodeIndices []int) []proto require.NoError(t, err) for _, endpoint := range endpoints { if endpoint.IP != nil { - addresses = append(addresses, proto.PexAddress{ + addresses = append(addresses, p2pproto.PexAddress{ ID: string(nodeAddrs.NodeID), IP: endpoint.IP.String(), Port: uint32(endpoint.Port), diff --git a/internal/p2p/pqueue.go b/internal/p2p/pqueue.go index e4560c7bd..e0e812cf5 100644 --- a/internal/p2p/pqueue.go +++ b/internal/p2p/pqueue.go @@ -71,7 +71,7 @@ type pqScheduler struct { size uint sizes map[uint]uint // cumulative priority sizes pq *priorityQueue - chDescs []ChannelDescriptor + chDescs []*ChannelDescriptor capacity uint chPriorities map[ChannelID]uint @@ -84,12 +84,12 @@ type pqScheduler struct { func newPQScheduler( logger log.Logger, m *Metrics, - chDescs []ChannelDescriptor, + chDescs []*ChannelDescriptor, enqueueBuf, dequeueBuf, capacity uint, ) *pqScheduler { // copy each ChannelDescriptor and sort them by ascending channel priority - chDescsCopy := make([]ChannelDescriptor, len(chDescs)) + chDescsCopy := make([]*ChannelDescriptor, len(chDescs)) copy(chDescsCopy, chDescs) sort.Slice(chDescsCopy, func(i, j int) bool { return chDescsCopy[i].Priority < chDescsCopy[j].Priority }) @@ -99,7 +99,7 @@ func newPQScheduler( ) for _, chDesc := range chDescsCopy { - chID := ChannelID(chDesc.ID) + chID := chDesc.ID chPriorities[chID] = uint(chDesc.Priority) sizes[uint(chDesc.Priority)] = 0 } @@ -167,13 +167,12 @@ func (s *pqScheduler) process() { timestamp: time.Now().UTC(), } - s.metrics.PeerPendingSendBytes.With("peer_id", string(pqEnv.envelope.To)).Add(float64(pqEnv.size)) - // enqueue // Check if we have sufficient capacity to simply enqueue the incoming // Envelope. if s.size+pqEnv.size <= s.capacity { + s.metrics.PeerPendingSendBytes.With("peer_id", string(pqEnv.envelope.To)).Add(float64(pqEnv.size)) // enqueue the incoming Envelope s.push(pqEnv) } else { @@ -213,6 +212,8 @@ func (s *pqScheduler) process() { "capacity", s.capacity, ) + s.metrics.PeerPendingSendBytes.With("peer_id", string(pqEnvTmp.envelope.To)).Add(float64(-pqEnvTmp.size)) + // dequeue/drop from the priority queue heap.Remove(s.pq, pqEnvTmp.index) @@ -257,6 +258,8 @@ func (s *pqScheduler) process() { s.metrics.PeerSendBytesTotal.With( "chID", chIDStr, "peer_id", string(pqEnv.envelope.To)).Add(float64(pqEnv.size)) + s.metrics.PeerPendingSendBytes.With( + "peer_id", string(pqEnv.envelope.To)).Add(float64(-pqEnv.size)) select { case s.dequeueCh <- pqEnv.envelope: case <-s.closer.Done(): diff --git a/internal/p2p/pqueue_test.go b/internal/p2p/pqueue_test.go index ddb7addbe..ffa7e39a8 100644 --- a/internal/p2p/pqueue_test.go +++ b/internal/p2p/pqueue_test.go @@ -4,13 +4,16 @@ import ( "testing" "time" + gogotypes "github.com/gogo/protobuf/types" "github.com/tendermint/tendermint/libs/log" ) +type testMessage = gogotypes.StringValue + func TestCloseWhileDequeueFull(t *testing.T) { enqueueLength := 5 - chDescs := []ChannelDescriptor{ - {ID: 0x01, Priority: 1, MaxSendBytes: 4}, + chDescs := []*ChannelDescriptor{ + {ID: 0x01, Priority: 1}, } pqueue := newPQScheduler(log.NewNopLogger(), NopMetrics(), chDescs, uint(enqueueLength), 1, 120) diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 1171566d1..6c4694624 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -21,8 +21,7 @@ import ( const queueBufferDefault = 32 -// ChannelID is an arbitrary channel ID. -type ChannelID uint16 +const dialRandomizerIntervalMillisecond = 3000 // Envelope contains a message with sender/receiver routing info. type Envelope struct { @@ -131,8 +130,8 @@ type RouterOptions struct { // no timeout. HandshakeTimeout time.Duration - // QueueType must be "wdrr" (Weighed Deficit Round Robin), "priority", or - // "fifo". Defaults to "fifo". + // QueueType must be, "priority", or "fifo". Defaults to + // "fifo". QueueType string // MaxIncomingConnectionAttempts rate limits the number of incoming connection @@ -174,7 +173,6 @@ type RouterOptions struct { const ( queueTypeFifo = "fifo" queueTypePriority = "priority" - queueTypeWDRR = "wdrr" ) // Validate validates router options. @@ -182,8 +180,8 @@ func (o *RouterOptions) Validate() error { switch o.QueueType { case "": o.QueueType = queueTypeFifo - case queueTypeFifo, queueTypeWDRR, queueTypePriority: - // passI me + case queueTypeFifo, queueTypePriority: + // pass default: return fmt.Errorf("queue type %q is not supported", o.QueueType) } @@ -251,7 +249,7 @@ type Router struct { nodeInfo types.NodeInfo privKey crypto.PrivKey peerManager *PeerManager - chDescs []ChannelDescriptor + chDescs []*ChannelDescriptor transports []Transport connTracker connectionTracker protocolTransports map[Protocol]Transport @@ -297,7 +295,7 @@ func NewRouter( options.MaxIncomingConnectionAttempts, options.IncomingConnectionWindow, ), - chDescs: make([]ChannelDescriptor, 0), + chDescs: make([]*ChannelDescriptor, 0), transports: transports, protocolTransports: map[Protocol]Transport{}, peerManager: peerManager, @@ -345,17 +343,6 @@ func (r *Router) createQueueFactory() (func(int) queue, error) { return q }, nil - case queueTypeWDRR: - return func(size int) queue { - if size%2 != 0 { - size++ - } - - q := newWDRRScheduler(r.logger, r.metrics, r.chDescs, uint(size)/2, uint(size)/2, defaultCapacity) - q.start() - return q - }, nil - default: return nil, fmt.Errorf("cannot construct queue of type %q", r.options.QueueType) } @@ -367,19 +354,21 @@ func (r *Router) createQueueFactory() (func(int) queue, error) { // implement Wrapper to automatically (un)wrap multiple message types in a // wrapper message. The caller may provide a size to make the channel buffered, // which internally makes the inbound, outbound, and error channel buffered. -func (r *Router) OpenChannel(chDesc ChannelDescriptor, messageType proto.Message, size int) (*Channel, error) { +func (r *Router) OpenChannel(chDesc *ChannelDescriptor) (*Channel, error) { r.channelMtx.Lock() defer r.channelMtx.Unlock() - id := ChannelID(chDesc.ID) + id := chDesc.ID if _, ok := r.channelQueues[id]; ok { return nil, fmt.Errorf("channel %v already exists", id) } r.chDescs = append(r.chDescs, chDesc) - queue := r.queueFactory(size) - outCh := make(chan Envelope, size) - errCh := make(chan PeerError, size) + messageType := chDesc.MessageType + + queue := r.queueFactory(chDesc.RecvBufferCapacity) + outCh := make(chan Envelope, chDesc.RecvBufferCapacity) + errCh := make(chan PeerError, chDesc.RecvBufferCapacity) channel := NewChannel(id, messageType, queue.dequeue(), outCh, errCh) var wrapper Wrapper @@ -393,6 +382,10 @@ func (r *Router) OpenChannel(chDesc ChannelDescriptor, messageType proto.Message // add the channel to the nodeInfo if it's not already there. r.nodeInfo.AddChannel(uint16(chDesc.ID)) + for _, t := range r.transports { + t.AddChannelDescriptors([]*ChannelDescriptor{chDesc}) + } + go func() { defer func() { r.channelMtx.Lock() @@ -544,7 +537,7 @@ func (r *Router) filterPeersID(ctx context.Context, id types.NodeID) error { func (r *Router) dialSleep(ctx context.Context) { if r.options.DialSleep == nil { // nolint:gosec // G404: Use of weak random number generator - timer := time.NewTimer(time.Duration(rand.Int63n(dialRandomizerIntervalMilliseconds)) * time.Millisecond) + timer := time.NewTimer(time.Duration(rand.Int63n(dialRandomizerIntervalMillisecond)) * time.Millisecond) defer timer.Stop() select { @@ -620,7 +613,7 @@ func (r *Router) openConnection(ctx context.Context, conn Connection) { // The Router should do the handshake and have a final ack/fail // message to make sure both ends have accepted the connection, such // that it can be coordinated with the peer manager. - peerInfo, _, err := r.handshakePeer(ctx, conn, "") + peerInfo, err := r.handshakePeer(ctx, conn, "") switch { case errors.Is(err, context.Canceled): return @@ -714,7 +707,7 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) { return } - peerInfo, _, err := r.handshakePeer(ctx, conn, address.NodeID) + peerInfo, err := r.handshakePeer(ctx, conn, address.NodeID) switch { case errors.Is(err, context.Canceled): conn.Close() @@ -809,7 +802,7 @@ func (r *Router) handshakePeer( ctx context.Context, conn Connection, expectID types.NodeID, -) (types.NodeInfo, crypto.PubKey, error) { +) (types.NodeInfo, error) { if r.options.HandshakeTimeout > 0 { var cancel context.CancelFunc @@ -819,27 +812,27 @@ func (r *Router) handshakePeer( peerInfo, peerKey, err := conn.Handshake(ctx, r.nodeInfo, r.privKey) if err != nil { - return peerInfo, peerKey, err + return peerInfo, err } if err = peerInfo.Validate(); err != nil { - return peerInfo, peerKey, fmt.Errorf("invalid handshake NodeInfo: %w", err) + return peerInfo, fmt.Errorf("invalid handshake NodeInfo: %w", err) } if types.NodeIDFromPubKey(peerKey) != peerInfo.NodeID { - return peerInfo, peerKey, fmt.Errorf("peer's public key did not match its node ID %q (expected %q)", + return peerInfo, fmt.Errorf("peer's public key did not match its node ID %q (expected %q)", peerInfo.NodeID, types.NodeIDFromPubKey(peerKey)) } if expectID != "" && expectID != peerInfo.NodeID { - return peerInfo, peerKey, fmt.Errorf("expected to connect with peer %q, got %q", + return peerInfo, fmt.Errorf("expected to connect with peer %q, got %q", expectID, peerInfo.NodeID) } if err := r.nodeInfo.CompatibleWith(peerInfo); err != nil { - return peerInfo, peerKey, ErrRejected{ + return peerInfo, ErrRejected{ err: err, id: peerInfo.ID(), isIncompatible: true, } } - return peerInfo, peerKey, nil + return peerInfo, nil } func (r *Router) runWithPeerMutex(fn func() error) error { @@ -970,8 +963,7 @@ func (r *Router) sendPeer(peerID types.NodeID, conn Connection, peerQueue queue) continue } - _, err = conn.SendMessage(envelope.channelID, bz) - if err != nil { + if err = conn.SendMessage(envelope.channelID, bz); err != nil { return err } @@ -1023,13 +1015,11 @@ func (r *Router) NodeInfo() types.NodeInfo { // OnStart implements service.Service. func (r *Router) OnStart() error { - netAddr, _ := r.nodeInfo.NetAddress() r.Logger.Info( "starting router", "node_id", r.nodeInfo.NodeID, "channels", r.nodeInfo.Channels, "listen_addr", r.nodeInfo.ListenAddr, - "net_addr", netAddr, ) go r.dialPeers() diff --git a/internal/p2p/router_init_test.go b/internal/p2p/router_init_test.go index 3622c0cc1..b90d2a3dd 100644 --- a/internal/p2p/router_init_test.go +++ b/internal/p2p/router_init_test.go @@ -38,14 +38,6 @@ func TestRouter_ConstructQueueFactory(t *testing.T) { require.True(t, ok) defer q.close() }) - t.Run("WDRR", func(t *testing.T) { - opts := RouterOptions{QueueType: queueTypeWDRR} - r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) - require.NoError(t, err) - q, ok := r.queueFactory(1).(*wdrrScheduler) - require.True(t, ok) - defer q.close() - }) t.Run("NonExistant", func(t *testing.T) { opts := RouterOptions{QueueType: "fast"} _, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index 436e3f004..997f02a06 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -50,7 +50,7 @@ func TestRouter_Network(t *testing.T) { network := p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: 8}) local := network.RandomNode() peers := network.Peers(local.NodeID) - channels := network.MakeChannels(t, chDesc, &p2ptest.Message{}, 0) + channels := network.MakeChannels(t, chDesc) network.Start(t) @@ -119,25 +119,26 @@ func TestRouter_Channel_Basic(t *testing.T) { }) // Opening a channel should work. - channel, err := router.OpenChannel(chDesc, &p2ptest.Message{}, 0) + channel, err := router.OpenChannel(chDesc) require.NoError(t, err) - require.Contains(t, router.NodeInfo().Channels, chDesc.ID) + require.Contains(t, router.NodeInfo().Channels, byte(chDesc.ID)) // Opening the same channel again should fail. - _, err = router.OpenChannel(chDesc, &p2ptest.Message{}, 0) + _, err = router.OpenChannel(chDesc) require.Error(t, err) // Opening a different channel should work. - chDesc2 := p2p.ChannelDescriptor{ID: byte(2)} - _, err = router.OpenChannel(chDesc2, &p2ptest.Message{}, 0) + chDesc2 := &p2p.ChannelDescriptor{ID: 2, MessageType: &p2ptest.Message{}} + _, err = router.OpenChannel(chDesc2) + require.NoError(t, err) - require.Contains(t, router.NodeInfo().Channels, chDesc2.ID) + require.Contains(t, router.NodeInfo().Channels, byte(chDesc2.ID)) // Closing the channel, then opening it again should be fine. channel.Close() time.Sleep(100 * time.Millisecond) // yes yes, but Close() is async... - channel, err = router.OpenChannel(chDesc, &p2ptest.Message{}, 0) + channel, err = router.OpenChannel(chDesc) require.NoError(t, err) // We should be able to send on the channel, even though there are no peers. @@ -163,9 +164,9 @@ func TestRouter_Channel_SendReceive(t *testing.T) { ids := network.NodeIDs() aID, bID, cID := ids[0], ids[1], ids[2] - channels := network.MakeChannels(t, chDesc, &p2ptest.Message{}, 0) + channels := network.MakeChannels(t, chDesc) a, b, c := channels[aID], channels[bID], channels[cID] - otherChannels := network.MakeChannels(t, p2ptest.MakeChannelDesc(9), &p2ptest.Message{}, 0) + otherChannels := network.MakeChannels(t, p2ptest.MakeChannelDesc(9)) network.Start(t) @@ -222,7 +223,7 @@ func TestRouter_Channel_Broadcast(t *testing.T) { ids := network.NodeIDs() aID, bID, cID, dID := ids[0], ids[1], ids[2], ids[3] - channels := network.MakeChannels(t, chDesc, &p2ptest.Message{}, 0) + channels := network.MakeChannels(t, chDesc) a, b, c, d := channels[aID], channels[bID], channels[cID], channels[dID] network.Start(t) @@ -250,7 +251,15 @@ func TestRouter_Channel_Wrapper(t *testing.T) { ids := network.NodeIDs() aID, bID := ids[0], ids[1] - channels := network.MakeChannels(t, chDesc, &wrapperMessage{}, 0) + chDesc := &p2p.ChannelDescriptor{ + ID: chID, + MessageType: &wrapperMessage{}, + Priority: 5, + SendQueueCapacity: 10, + RecvMessageCapacity: 10, + } + + channels := network.MakeChannels(t, chDesc) a, b := channels[aID], channels[bID] network.Start(t) @@ -310,7 +319,7 @@ func TestRouter_Channel_Error(t *testing.T) { ids := network.NodeIDs() aID, bID := ids[0], ids[1] - channels := network.MakeChannels(t, chDesc, &p2ptest.Message{}, 0) + channels := network.MakeChannels(t, chDesc) a := channels[aID] // Erroring b should cause it to be disconnected. It will reconnect shortly after. @@ -367,7 +376,7 @@ func TestRouter_AcceptPeers(t *testing.T) { mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept").Maybe().Return(nil, io.EOF) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -755,7 +764,7 @@ func TestRouter_EvictPeers(t *testing.T) { mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept").Maybe().Return(nil, io.EOF) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -865,11 +874,12 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) { mockConnection.On("ReceiveMessage").Return(chID, nil, io.EOF) mockTransport := &mocks.Transport{} + mockTransport.On("AddChannelDescriptors", mock.Anything).Return() mockTransport.On("String").Maybe().Return("mock") mockTransport.On("Protocols").Return([]p2p.Protocol{"mock"}) mockTransport.On("Close").Return(nil) mockTransport.On("Accept").Once().Return(mockConnection, nil) - mockTransport.On("Accept").Once().Return(nil, io.EOF) + mockTransport.On("Accept").Maybe().Return(nil, io.EOF) // Set up and start the router. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -896,7 +906,7 @@ func TestRouter_DontSendOnInvalidChannel(t *testing.T) { Status: p2p.PeerStatusUp, }) - channel, err := router.OpenChannel(chDesc, &p2ptest.Message{}, 0) + channel, err := router.OpenChannel(chDesc) require.NoError(t, err) channel.Out <- p2p.Envelope{ diff --git a/internal/p2p/shim.go b/internal/p2p/shim.go deleted file mode 100644 index 07d1ad156..000000000 --- a/internal/p2p/shim.go +++ /dev/null @@ -1,334 +0,0 @@ -package p2p - -import ( - "errors" - "sort" - - "github.com/gogo/protobuf/proto" - "github.com/tendermint/tendermint/libs/log" -) - -// ============================================================================ -// TODO: Types and business logic below are temporary and will be removed once -// the legacy p2p stack is removed in favor of the new model. -// -// ref: https://github.com/tendermint/tendermint/issues/5670 -// ============================================================================ - -var _ Reactor = (*ReactorShim)(nil) - -type ( - messageValidator interface { - Validate() error - } - - // ReactorShim defines a generic shim wrapper around a BaseReactor. It is - // responsible for wiring up legacy p2p behavior to the new p2p semantics - // (e.g. proxying Envelope messages to legacy peers). - ReactorShim struct { - BaseReactor - - Name string - PeerUpdates *PeerUpdates - Channels map[ChannelID]*ChannelShim - } - - // ChannelShim defines a generic shim wrapper around a legacy p2p channel - // and the new p2p Channel. It also includes the raw bi-directional Go channels - // so we can proxy message delivery. - ChannelShim struct { - Descriptor *ChannelDescriptor - Channel *Channel - inCh chan<- Envelope - outCh <-chan Envelope - errCh <-chan PeerError - } - - // ChannelDescriptorShim defines a shim wrapper around a legacy p2p channel - // and the proto.Message the new p2p Channel is responsible for handling. - // A ChannelDescriptorShim is not contained in ReactorShim, but is rather - // used to construct a ReactorShim. - ChannelDescriptorShim struct { - MsgType proto.Message - Descriptor *ChannelDescriptor - } -) - -func NewReactorShim(logger log.Logger, name string, descriptors map[ChannelID]*ChannelDescriptorShim) *ReactorShim { - channels := make(map[ChannelID]*ChannelShim) - - for _, cds := range descriptors { - chShim := NewChannelShim(cds, 0) - channels[chShim.Channel.ID] = chShim - } - - rs := &ReactorShim{ - Name: name, - PeerUpdates: NewPeerUpdates(make(chan PeerUpdate), 0), - Channels: channels, - } - - rs.BaseReactor = *NewBaseReactor(name, rs) - rs.SetLogger(logger) - - return rs -} - -func NewChannelShim(cds *ChannelDescriptorShim, buf uint) *ChannelShim { - inCh := make(chan Envelope, buf) - outCh := make(chan Envelope, buf) - errCh := make(chan PeerError, buf) - return &ChannelShim{ - Descriptor: cds.Descriptor, - Channel: NewChannel( - ChannelID(cds.Descriptor.ID), - cds.MsgType, - inCh, - outCh, - errCh, - ), - inCh: inCh, - outCh: outCh, - errCh: errCh, - } -} - -// proxyPeerEnvelopes iterates over each p2p Channel and starts a separate -// go-routine where we listen for outbound envelopes sent during Receive -// executions (or anything else that may send on the Channel) and proxy them to -// the corresponding Peer using the To field from the envelope. -func (rs *ReactorShim) proxyPeerEnvelopes() { - for _, cs := range rs.Channels { - go func(cs *ChannelShim) { - for e := range cs.outCh { - msg := proto.Clone(cs.Channel.messageType) - msg.Reset() - - wrapper, ok := msg.(Wrapper) - if ok { - if err := wrapper.Wrap(e.Message); err != nil { - rs.Logger.Error( - "failed to proxy envelope; failed to wrap message", - "ch_id", cs.Descriptor.ID, - "err", err, - ) - continue - } - } else { - msg = e.Message - } - - bz, err := proto.Marshal(msg) - if err != nil { - rs.Logger.Error( - "failed to proxy envelope; failed to encode message", - "ch_id", cs.Descriptor.ID, - "err", err, - ) - continue - } - - switch { - case e.Broadcast: - rs.Switch.Broadcast(cs.Descriptor.ID, bz) - - case e.To != "": - src := rs.Switch.peers.Get(e.To) - if src == nil { - rs.Logger.Debug( - "failed to proxy envelope; failed to find peer", - "ch_id", cs.Descriptor.ID, - "peer", e.To, - ) - continue - } - - if !src.Send(cs.Descriptor.ID, bz) { - // This usually happens when we try to send across a channel - // that the peer doesn't have open. To avoid bloating the - // logs we set this to be Debug - rs.Logger.Debug( - "failed to proxy message to peer", - "ch_id", cs.Descriptor.ID, - "peer", e.To, - ) - } - - default: - rs.Logger.Error("failed to proxy envelope; missing peer ID", "ch_id", cs.Descriptor.ID) - } - } - }(cs) - } -} - -// handlePeerErrors iterates over each p2p Channel and starts a separate go-routine -// where we listen for peer errors. For each peer error, we find the peer from -// the legacy p2p Switch and execute a StopPeerForError call with the corresponding -// peer error. -func (rs *ReactorShim) handlePeerErrors() { - for _, cs := range rs.Channels { - go func(cs *ChannelShim) { - for pErr := range cs.errCh { - if pErr.NodeID != "" { - peer := rs.Switch.peers.Get(pErr.NodeID) - if peer == nil { - rs.Logger.Error("failed to handle peer error; failed to find peer", "peer", pErr.NodeID) - continue - } - - rs.Switch.StopPeerForError(peer, pErr.Err) - } - } - }(cs) - } -} - -// OnStart executes the reactor shim's OnStart hook where we start all the -// necessary go-routines in order to proxy peer envelopes and errors per p2p -// Channel. -func (rs *ReactorShim) OnStart() error { - if rs.Switch == nil { - return errors.New("proxyPeerEnvelopes: reactor shim switch is nil") - } - - // start envelope proxying and peer error handling in separate go routines - rs.proxyPeerEnvelopes() - rs.handlePeerErrors() - - return nil -} - -// GetChannel returns a p2p Channel reference for a given ChannelID. If no -// Channel exists, nil is returned. -func (rs *ReactorShim) GetChannel(cID ChannelID) *Channel { - channelShim, ok := rs.Channels[cID] - if ok { - return channelShim.Channel - } - - return nil -} - -// GetChannels implements the legacy Reactor interface for getting a slice of all -// the supported ChannelDescriptors. -func (rs *ReactorShim) GetChannels() []*ChannelDescriptor { - sortedChIDs := make([]ChannelID, 0, len(rs.Channels)) - for cID := range rs.Channels { - sortedChIDs = append(sortedChIDs, cID) - } - - sort.Slice(sortedChIDs, func(i, j int) bool { return sortedChIDs[i] < sortedChIDs[j] }) - - descriptors := make([]*ChannelDescriptor, len(rs.Channels)) - for i, cID := range sortedChIDs { - descriptors[i] = rs.Channels[cID].Descriptor - } - - return descriptors -} - -// AddPeer sends a PeerUpdate with status PeerStatusUp on the PeerUpdateCh. -// The embedding reactor must be sure to listen for messages on this channel to -// handle adding a peer. -func (rs *ReactorShim) AddPeer(peer Peer) { - select { - case rs.PeerUpdates.reactorUpdatesCh <- PeerUpdate{NodeID: peer.ID(), Status: PeerStatusUp}: - rs.Logger.Debug("sent peer update", "reactor", rs.Name, "peer", peer.ID(), "status", PeerStatusUp) - - case <-rs.PeerUpdates.Done(): - // NOTE: We explicitly DO NOT close the PeerUpdatesCh's updateCh go channel. - // This is because there may be numerous spawned goroutines that are - // attempting to send on the updateCh go channel and when the reactor stops - // we do not want to preemptively close the channel as that could result in - // panics sending on a closed channel. This also means that reactors MUST - // be certain there are NO listeners on the updateCh channel when closing or - // stopping. - } -} - -// RemovePeer sends a PeerUpdate with status PeerStatusDown on the PeerUpdateCh. -// The embedding reactor must be sure to listen for messages on this channel to -// handle removing a peer. -func (rs *ReactorShim) RemovePeer(peer Peer, reason interface{}) { - select { - case rs.PeerUpdates.reactorUpdatesCh <- PeerUpdate{NodeID: peer.ID(), Status: PeerStatusDown}: - rs.Logger.Debug( - "sent peer update", - "reactor", rs.Name, - "peer", peer.ID(), - "reason", reason, - "status", PeerStatusDown, - ) - - case <-rs.PeerUpdates.Done(): - // NOTE: We explicitly DO NOT close the PeerUpdatesCh's updateCh go channel. - // This is because there may be numerous spawned goroutines that are - // attempting to send on the updateCh go channel and when the reactor stops - // we do not want to preemptively close the channel as that could result in - // panics sending on a closed channel. This also means that reactors MUST - // be certain there are NO listeners on the updateCh channel when closing or - // stopping. - } -} - -// Receive implements a generic wrapper around implementing the Receive method -// on the legacy Reactor p2p interface. If the reactor is running, Receive will -// find the corresponding new p2p Channel, create and decode the appropriate -// proto.Message from the msgBytes, execute any validation and finally construct -// and send a p2p Envelope on the appropriate p2p Channel. -func (rs *ReactorShim) Receive(chID byte, src Peer, msgBytes []byte) { - if !rs.IsRunning() { - return - } - - cID := ChannelID(chID) - channelShim, ok := rs.Channels[cID] - if !ok { - rs.Logger.Error("unexpected channel", "peer", src, "ch_id", chID) - return - } - - msg := proto.Clone(channelShim.Channel.messageType) - msg.Reset() - - if err := proto.Unmarshal(msgBytes, msg); err != nil { - rs.Logger.Error("error decoding message", "peer", src, "ch_id", cID, "err", err) - rs.Switch.StopPeerForError(src, err) - return - } - - validator, ok := msg.(messageValidator) - if ok { - if err := validator.Validate(); err != nil { - rs.Logger.Error("invalid message", "peer", src, "ch_id", cID, "err", err) - rs.Switch.StopPeerForError(src, err) - return - } - } - - wrapper, ok := msg.(Wrapper) - if ok { - var err error - - msg, err = wrapper.Unwrap() - if err != nil { - rs.Logger.Error("failed to unwrap message", "peer", src, "ch_id", chID, "err", err) - return - } - } - - select { - case channelShim.inCh <- Envelope{From: src.ID(), Message: msg}: - rs.Logger.Debug("proxied envelope", "reactor", rs.Name, "ch_id", cID, "peer", src.ID()) - - case <-channelShim.Channel.Done(): - // NOTE: We explicitly DO NOT close the p2p Channel's inbound go channel. - // This is because there may be numerous spawned goroutines that are - // attempting to send on the inbound channel and when the reactor stops we - // do not want to preemptively close the channel as that could result in - // panics sending on a closed channel. This also means that reactors MUST - // be certain there are NO listeners on the inbound channel when closing or - // stopping. - } -} diff --git a/internal/p2p/shim_test.go b/internal/p2p/shim_test.go deleted file mode 100644 index d8b9e30c3..000000000 --- a/internal/p2p/shim_test.go +++ /dev/null @@ -1,207 +0,0 @@ -package p2p_test - -import ( - "sync" - "testing" - - "github.com/gogo/protobuf/proto" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/p2p" - p2pmocks "github.com/tendermint/tendermint/internal/p2p/mocks" - "github.com/tendermint/tendermint/libs/log" - ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - "github.com/tendermint/tendermint/types" -) - -var ( - channelID1 = byte(0x01) - channelID2 = byte(0x02) - - p2pCfg = config.DefaultP2PConfig() - - testChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - p2p.ChannelID(channelID1): { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: channelID1, - Priority: 3, - SendQueueCapacity: 10, - RecvMessageCapacity: int(4e6), - }, - }, - p2p.ChannelID(channelID2): { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: channelID2, - Priority: 1, - SendQueueCapacity: 4, - RecvMessageCapacity: int(16e6), - }, - }, - } -) - -type reactorShimTestSuite struct { - shim *p2p.ReactorShim - sw *p2p.Switch -} - -func setup(t *testing.T, peers []p2p.Peer) *reactorShimTestSuite { - t.Helper() - - rts := &reactorShimTestSuite{ - shim: p2p.NewReactorShim(log.TestingLogger(), "TestShim", testChannelShims), - } - - rts.sw = p2p.MakeSwitch(p2pCfg, 1, "testing", "123.123.123", func(_ int, sw *p2p.Switch) *p2p.Switch { - for _, peer := range peers { - p2p.AddPeerToSwitchPeerSet(sw, peer) - } - - sw.AddReactor(rts.shim.Name, rts.shim) - return sw - }, log.TestingLogger()) - - // start the reactor shim - require.NoError(t, rts.shim.Start()) - - t.Cleanup(func() { - require.NoError(t, rts.shim.Stop()) - - for _, chs := range rts.shim.Channels { - chs.Channel.Close() - } - }) - - return rts -} - -func simplePeer(t *testing.T, id string) (*p2pmocks.Peer, types.NodeID) { - t.Helper() - - peerID := types.NodeID(id) - peer := &p2pmocks.Peer{} - peer.On("ID").Return(peerID) - - return peer, peerID -} - -func TestReactorShim_GetChannel(t *testing.T) { - rts := setup(t, nil) - - p2pCh := rts.shim.GetChannel(p2p.ChannelID(channelID1)) - require.NotNil(t, p2pCh) - require.Equal(t, p2pCh.ID, p2p.ChannelID(channelID1)) - - p2pCh = rts.shim.GetChannel(p2p.ChannelID(byte(0x03))) - require.Nil(t, p2pCh) -} - -func TestReactorShim_GetChannels(t *testing.T) { - rts := setup(t, nil) - - p2pChs := rts.shim.GetChannels() - require.Len(t, p2pChs, 2) - require.Equal(t, p2p.ChannelID(p2pChs[0].ID), p2p.ChannelID(channelID1)) - require.Equal(t, p2p.ChannelID(p2pChs[1].ID), p2p.ChannelID(channelID2)) -} - -func TestReactorShim_AddPeer(t *testing.T) { - peerA, peerIDA := simplePeer(t, "aa") - rts := setup(t, []p2p.Peer{peerA}) - - var wg sync.WaitGroup - wg.Add(1) - - var peerUpdate p2p.PeerUpdate - go func() { - peerUpdate = <-rts.shim.PeerUpdates.Updates() - wg.Done() - }() - - rts.shim.AddPeer(peerA) - wg.Wait() - - require.Equal(t, peerIDA, peerUpdate.NodeID) - require.Equal(t, p2p.PeerStatusUp, peerUpdate.Status) -} - -func TestReactorShim_RemovePeer(t *testing.T) { - peerA, peerIDA := simplePeer(t, "aa") - rts := setup(t, []p2p.Peer{peerA}) - - var wg sync.WaitGroup - wg.Add(1) - - var peerUpdate p2p.PeerUpdate - go func() { - peerUpdate = <-rts.shim.PeerUpdates.Updates() - wg.Done() - }() - - rts.shim.RemovePeer(peerA, "test reason") - wg.Wait() - - require.Equal(t, peerIDA, peerUpdate.NodeID) - require.Equal(t, p2p.PeerStatusDown, peerUpdate.Status) -} - -func TestReactorShim_Receive(t *testing.T) { - peerA, peerIDA := simplePeer(t, "aa") - rts := setup(t, []p2p.Peer{peerA}) - - msg := &ssproto.Message{ - Sum: &ssproto.Message_ChunkRequest{ - ChunkRequest: &ssproto.ChunkRequest{Height: 1, Format: 1, Index: 1}, - }, - } - - bz, err := proto.Marshal(msg) - require.NoError(t, err) - - var wg sync.WaitGroup - - var response *ssproto.Message - peerA.On("Send", channelID1, mock.Anything).Run(func(args mock.Arguments) { - m := &ssproto.Message{} - require.NoError(t, proto.Unmarshal(args[1].([]byte), m)) - - response = m - wg.Done() - }).Return(true) - - p2pCh := rts.shim.Channels[p2p.ChannelID(channelID1)] - - wg.Add(2) - - // Simulate receiving the envelope in some real reactor and replying back with - // the same envelope and then closing the Channel. - go func() { - e := <-p2pCh.Channel.In - require.Equal(t, peerIDA, e.From) - require.NotNil(t, e.Message) - - p2pCh.Channel.Out <- p2p.Envelope{To: e.From, Message: e.Message} - p2pCh.Channel.Close() - wg.Done() - }() - - rts.shim.Receive(channelID1, peerA, bz) - - // wait until the mock peer called Send and we (fake) proxied the envelope - wg.Wait() - require.NotNil(t, response) - - m, err := response.Unwrap() - require.NoError(t, err) - require.Equal(t, msg.GetChunkRequest(), m) - - // Since p2pCh was closed in the simulated reactor above, calling Receive - // should not block. - rts.shim.Receive(channelID1, peerA, bz) - require.Empty(t, p2pCh.Channel.In) - - peerA.AssertExpectations(t) -} diff --git a/internal/p2p/switch.go b/internal/p2p/switch.go deleted file mode 100644 index eeb93a994..000000000 --- a/internal/p2p/switch.go +++ /dev/null @@ -1,1065 +0,0 @@ -package p2p - -import ( - "context" - "fmt" - "io" - "math" - mrand "math/rand" - "net" - "sync" - "time" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/cmap" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" -) - -const ( - // wait a random amount of time from this interval - // before dialing peers or reconnecting to help prevent DoS - dialRandomizerIntervalMilliseconds = 3000 - - // repeatedly try to reconnect for a few minutes - // ie. 5 * 20 = 100s - reconnectAttempts = 20 - reconnectInterval = 5 * time.Second - - // then move into exponential backoff mode for ~1day - // ie. 3**10 = 16hrs - reconnectBackOffAttempts = 10 - reconnectBackOffBaseSeconds = 3 - - defaultFilterTimeout = 5 * time.Second -) - -// MConnConfig returns an MConnConfig with fields updated -// from the P2PConfig. -func MConnConfig(cfg *config.P2PConfig) conn.MConnConfig { - mConfig := conn.DefaultMConnConfig() - mConfig.FlushThrottle = cfg.FlushThrottleTimeout - mConfig.SendRate = cfg.SendRate - mConfig.RecvRate = cfg.RecvRate - mConfig.MaxPacketMsgPayloadSize = cfg.MaxPacketMsgPayloadSize - return mConfig -} - -//----------------------------------------------------------------------------- - -// An AddrBook represents an address book from the pex package, which is used -// to store peer addresses. -type AddrBook interface { - AddAddress(addr *NetAddress, src *NetAddress) error - AddPrivateIDs([]string) - AddOurAddress(*NetAddress) - OurAddress(*NetAddress) bool - MarkGood(types.NodeID) - RemoveAddress(*NetAddress) - HasAddress(*NetAddress) bool - Save() -} - -// ConnFilterFunc is a callback for connection filtering. If it returns an -// error, the connection is rejected. The set of existing connections is passed -// along with the new connection and all resolved IPs. -type ConnFilterFunc func(ConnSet, net.Conn, []net.IP) error - -// PeerFilterFunc to be implemented by filter hooks after a new Peer has been -// fully setup. -type PeerFilterFunc func(IPeerSet, Peer) error - -// ConnDuplicateIPFilter resolves and keeps all ips for an incoming connection -// and refuses new ones if they come from a known ip. -var ConnDuplicateIPFilter ConnFilterFunc = func(cs ConnSet, c net.Conn, ips []net.IP) error { - for _, ip := range ips { - if cs.HasIP(ip) { - return ErrRejected{ - conn: c, - err: fmt.Errorf("ip<%v> already connected", ip), - isDuplicate: true, - } - } - } - return nil -} - -//----------------------------------------------------------------------------- - -// Switch handles peer connections and exposes an API to receive incoming messages -// on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one -// or more `Channels`. So while sending outgoing messages is typically performed on the peer, -// incoming messages are received on the reactor. -type Switch struct { - service.BaseService - - config *config.P2PConfig - reactors map[string]Reactor - chDescs []*conn.ChannelDescriptor - reactorsByCh map[byte]Reactor - peers *PeerSet - dialing *cmap.CMap - reconnecting *cmap.CMap - nodeInfo types.NodeInfo // our node info - nodeKey types.NodeKey // our node privkey - addrBook AddrBook - // peers addresses with whom we'll maintain constant connection - persistentPeersAddrs []*NetAddress - unconditionalPeerIDs map[types.NodeID]struct{} - - transport Transport - - filterTimeout time.Duration - peerFilters []PeerFilterFunc - connFilters []ConnFilterFunc - conns ConnSet - - metrics *Metrics -} - -// NetAddress returns the first address the switch is listening on, -// or nil if no addresses are found. -func (sw *Switch) NetAddress() *NetAddress { - endpoints := sw.transport.Endpoints() - if len(endpoints) == 0 { - return nil - } - return &NetAddress{ - ID: sw.nodeInfo.NodeID, - IP: endpoints[0].IP, - Port: endpoints[0].Port, - } -} - -// SwitchOption sets an optional parameter on the Switch. -type SwitchOption func(*Switch) - -// NewSwitch creates a new Switch with the given config. -func NewSwitch( - cfg *config.P2PConfig, - transport Transport, - options ...SwitchOption, -) *Switch { - sw := &Switch{ - config: cfg, - reactors: make(map[string]Reactor), - chDescs: make([]*conn.ChannelDescriptor, 0), - reactorsByCh: make(map[byte]Reactor), - peers: NewPeerSet(), - dialing: cmap.NewCMap(), - reconnecting: cmap.NewCMap(), - metrics: NopMetrics(), - transport: transport, - persistentPeersAddrs: make([]*NetAddress, 0), - unconditionalPeerIDs: make(map[types.NodeID]struct{}), - filterTimeout: defaultFilterTimeout, - conns: NewConnSet(), - } - - // Ensure PRNG is reseeded. - tmrand.Reseed() - - sw.BaseService = *service.NewBaseService(nil, "P2P Switch", sw) - - for _, option := range options { - option(sw) - } - - return sw -} - -// SwitchFilterTimeout sets the timeout used for peer filters. -func SwitchFilterTimeout(timeout time.Duration) SwitchOption { - return func(sw *Switch) { sw.filterTimeout = timeout } -} - -// SwitchPeerFilters sets the filters for rejection of new peers. -func SwitchPeerFilters(filters ...PeerFilterFunc) SwitchOption { - return func(sw *Switch) { sw.peerFilters = filters } -} - -// SwitchConnFilters sets the filters for rejection of connections. -func SwitchConnFilters(filters ...ConnFilterFunc) SwitchOption { - return func(sw *Switch) { sw.connFilters = filters } -} - -// WithMetrics sets the metrics. -func WithMetrics(metrics *Metrics) SwitchOption { - return func(sw *Switch) { sw.metrics = metrics } -} - -//--------------------------------------------------------------------- -// Switch setup - -// AddReactor adds the given reactor to the switch. -// NOTE: Not goroutine safe. -func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor { - for _, chDesc := range reactor.GetChannels() { - chID := chDesc.ID - // No two reactors can share the same channel. - if sw.reactorsByCh[chID] != nil { - panic(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chID, sw.reactorsByCh[chID], reactor)) - } - sw.chDescs = append(sw.chDescs, chDesc) - sw.reactorsByCh[chID] = reactor - } - sw.reactors[name] = reactor - reactor.SetSwitch(sw) - return reactor -} - -// RemoveReactor removes the given Reactor from the Switch. -// NOTE: Not goroutine safe. -func (sw *Switch) RemoveReactor(name string, reactor Reactor) { - for _, chDesc := range reactor.GetChannels() { - // remove channel description - for i := 0; i < len(sw.chDescs); i++ { - if chDesc.ID == sw.chDescs[i].ID { - sw.chDescs = append(sw.chDescs[:i], sw.chDescs[i+1:]...) - break - } - } - delete(sw.reactorsByCh, chDesc.ID) - } - delete(sw.reactors, name) - reactor.SetSwitch(nil) -} - -// Reactors returns a map of reactors registered on the switch. -// NOTE: Not goroutine safe. -func (sw *Switch) Reactors() map[string]Reactor { - return sw.reactors -} - -// Reactor returns the reactor with the given name. -// NOTE: Not goroutine safe. -func (sw *Switch) Reactor(name string) Reactor { - return sw.reactors[name] -} - -// SetNodeInfo sets the switch's NodeInfo for checking compatibility and handshaking with other nodes. -// NOTE: Not goroutine safe. -func (sw *Switch) SetNodeInfo(nodeInfo types.NodeInfo) { - sw.nodeInfo = nodeInfo -} - -// NodeInfo returns the switch's NodeInfo. -// NOTE: Not goroutine safe. -func (sw *Switch) NodeInfo() types.NodeInfo { - return sw.nodeInfo -} - -// SetNodeKey sets the switch's private key for authenticated encryption. -// NOTE: Not goroutine safe. -func (sw *Switch) SetNodeKey(nodeKey types.NodeKey) { - sw.nodeKey = nodeKey -} - -//--------------------------------------------------------------------- -// Service start/stop - -// OnStart implements BaseService. It starts all the reactors and peers. -func (sw *Switch) OnStart() error { - - // FIXME: Temporary hack to pass channel descriptors to MConn transport, - // since they are not available when it is constructed. This will be - // fixed when we implement the new router abstraction. - if t, ok := sw.transport.(*MConnTransport); ok { - t.channelDescs = sw.chDescs - } - - // Start reactors - for _, reactor := range sw.reactors { - err := reactor.Start() - if err != nil { - return fmt.Errorf("failed to start %v: %w", reactor, err) - } - } - - // Start accepting Peers. - go sw.acceptRoutine() - - return nil -} - -// OnStop implements BaseService. It stops all peers and reactors. -func (sw *Switch) OnStop() { - // Stop peers - for _, p := range sw.peers.List() { - sw.stopAndRemovePeer(p, nil) - } - - // Stop reactors - sw.Logger.Debug("Switch: Stopping reactors") - for _, reactor := range sw.reactors { - if err := reactor.Stop(); err != nil { - sw.Logger.Error("error while stopping reactor", "reactor", reactor, "error", err) - } - } -} - -//--------------------------------------------------------------------- -// Peers - -// Broadcast runs a go routine for each attempted send, which will block trying -// to send for defaultSendTimeoutSeconds. Returns a channel which receives -// success values for each attempted send (false if times out). Channel will be -// closed once msg bytes are sent to all peers (or time out). -// -// NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved. -func (sw *Switch) Broadcast(chID byte, msgBytes []byte) chan bool { - sw.Logger.Debug("Broadcast", "channel", chID, "msgBytes", msgBytes) - - peers := sw.peers.List() - var wg sync.WaitGroup - wg.Add(len(peers)) - successChan := make(chan bool, len(peers)) - - for _, peer := range peers { - go func(p Peer) { - defer wg.Done() - success := p.Send(chID, msgBytes) - successChan <- success - }(peer) - } - - go func() { - wg.Wait() - close(successChan) - }() - - return successChan -} - -// NumPeers returns the count of outbound/inbound and outbound-dialing peers. -// unconditional peers are not counted here. -func (sw *Switch) NumPeers() (outbound, inbound, dialing int) { - peers := sw.peers.List() - for _, peer := range peers { - if peer.IsOutbound() { - if !sw.IsPeerUnconditional(peer.ID()) { - outbound++ - } - } else { - if !sw.IsPeerUnconditional(peer.ID()) { - inbound++ - } - } - } - dialing = sw.dialing.Size() - return -} - -func (sw *Switch) IsPeerUnconditional(id types.NodeID) bool { - _, ok := sw.unconditionalPeerIDs[id] - return ok -} - -// MaxNumOutboundPeers returns a maximum number of outbound peers. -func (sw *Switch) MaxNumOutboundPeers() int { - return sw.config.MaxNumOutboundPeers -} - -// Peers returns the set of peers that are connected to the switch. -func (sw *Switch) Peers() IPeerSet { - return sw.peers -} - -// StopPeerForError disconnects from a peer due to external error. -// If the peer is persistent, it will attempt to reconnect. -// TODO: make record depending on reason. -func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) { - if !peer.IsRunning() { - return - } - - sw.Logger.Error("Stopping peer for error", "peer", peer, "err", reason) - sw.stopAndRemovePeer(peer, reason) - - if peer.IsPersistent() { - var addr *NetAddress - if peer.IsOutbound() { // socket address for outbound peers - addr = peer.SocketAddr() - } else { // self-reported address for inbound peers - var err error - addr, err = peer.NodeInfo().NetAddress() - if err != nil { - sw.Logger.Error("Wanted to reconnect to inbound peer, but self-reported address is wrong", - "peer", peer, "err", err) - return - } - } - go sw.reconnectToPeer(addr) - } -} - -// StopPeerGracefully disconnects from a peer gracefully. -// TODO: handle graceful disconnects. -func (sw *Switch) StopPeerGracefully(peer Peer) { - sw.Logger.Info("Stopping peer gracefully") - sw.stopAndRemovePeer(peer, nil) -} - -func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) { - if err := peer.Stop(); err != nil { - sw.Logger.Error("error while stopping peer", "error", err) // TODO: should return error to be handled accordingly - } - - for _, reactor := range sw.reactors { - reactor.RemovePeer(peer, reason) - } - - // Removing a peer should go last to avoid a situation where a peer - // reconnect to our node and the switch calls InitPeer before - // RemovePeer is finished. - // https://github.com/tendermint/tendermint/issues/3338 - if sw.peers.Remove(peer) { - sw.metrics.Peers.Add(float64(-1)) - } - - sw.conns.RemoveAddr(peer.RemoteAddr()) -} - -// reconnectToPeer tries to reconnect to the addr, first repeatedly -// with a fixed interval, then with exponential backoff. -// If no success after all that, it stops trying, and leaves it -// to the PEX/Addrbook to find the peer with the addr again -// NOTE: this will keep trying even if the handshake or auth fails. -// TODO: be more explicit with error types so we only retry on certain failures -// - ie. if we're getting ErrDuplicatePeer we can stop -// because the addrbook got us the peer back already -func (sw *Switch) reconnectToPeer(addr *NetAddress) { - if sw.reconnecting.Has(string(addr.ID)) { - return - } - sw.reconnecting.Set(string(addr.ID), addr) - defer sw.reconnecting.Delete(string(addr.ID)) - - start := time.Now() - sw.Logger.Info("Reconnecting to peer", "addr", addr) - for i := 0; i < reconnectAttempts; i++ { - if !sw.IsRunning() { - return - } - - err := sw.DialPeerWithAddress(addr) - if err == nil { - return // success - } else if _, ok := err.(ErrCurrentlyDialingOrExistingAddress); ok { - return - } - - sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "addr", addr) - // sleep a set amount - sw.randomSleep(reconnectInterval) - continue - } - - sw.Logger.Error("Failed to reconnect to peer. Beginning exponential backoff", - "addr", addr, "elapsed", time.Since(start)) - for i := 0; i < reconnectBackOffAttempts; i++ { - if !sw.IsRunning() { - return - } - - // sleep an exponentially increasing amount - sleepIntervalSeconds := math.Pow(reconnectBackOffBaseSeconds, float64(i)) - sw.randomSleep(time.Duration(sleepIntervalSeconds) * time.Second) - - err := sw.DialPeerWithAddress(addr) - if err == nil { - return // success - } else if _, ok := err.(ErrCurrentlyDialingOrExistingAddress); ok { - return - } - sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "addr", addr) - } - sw.Logger.Error("Failed to reconnect to peer. Giving up", "addr", addr, "elapsed", time.Since(start)) -} - -// SetAddrBook allows to set address book on Switch. -func (sw *Switch) SetAddrBook(addrBook AddrBook) { - sw.addrBook = addrBook -} - -// MarkPeerAsGood marks the given peer as good when it did something useful -// like contributed to consensus. -func (sw *Switch) MarkPeerAsGood(peer Peer) { - if sw.addrBook != nil { - sw.addrBook.MarkGood(peer.ID()) - } -} - -//--------------------------------------------------------------------- -// Dialing - -type privateAddr interface { - PrivateAddr() bool -} - -func isPrivateAddr(err error) bool { - te, ok := err.(privateAddr) - return ok && te.PrivateAddr() -} - -// DialPeersAsync dials a list of peers asynchronously in random order. -// Used to dial peers from config on startup or from unsafe-RPC (trusted sources). -// It ignores ErrNetAddressLookup. However, if there are other errors, first -// encounter is returned. -// Nop if there are no peers. -func (sw *Switch) DialPeersAsync(peers []string) error { - netAddrs, errs := NewNetAddressStrings(peers) - // report all the errors - for _, err := range errs { - sw.Logger.Error("Error in peer's address", "err", err) - } - // return first non-ErrNetAddressLookup error - for _, err := range errs { - if _, ok := err.(types.ErrNetAddressLookup); ok { - continue - } - return err - } - sw.dialPeersAsync(netAddrs) - return nil -} - -func (sw *Switch) dialPeersAsync(netAddrs []*NetAddress) { - ourAddr := sw.NetAddress() - - // TODO: this code feels like it's in the wrong place. - // The integration tests depend on the addrBook being saved - // right away but maybe we can change that. Recall that - // the addrBook is only written to disk every 2min - if sw.addrBook != nil { - // add peers to `addrBook` - for _, netAddr := range netAddrs { - // do not add our address or ID - if !netAddr.Same(ourAddr) { - if err := sw.addrBook.AddAddress(netAddr, ourAddr); err != nil { - if isPrivateAddr(err) { - sw.Logger.Debug("Won't add peer's address to addrbook", "err", err) - } else { - sw.Logger.Error("Can't add peer's address to addrbook", "err", err) - } - } - } - } - // Persist some peers to disk right away. - // NOTE: integration tests depend on this - sw.addrBook.Save() - } - - // permute the list, dial them in random order. - perm := mrand.Perm(len(netAddrs)) - for i := 0; i < len(perm); i++ { - go func(i int) { - j := perm[i] - addr := netAddrs[j] - - if addr.Same(ourAddr) { - sw.Logger.Debug("Ignore attempt to connect to ourselves", "addr", addr, "ourAddr", ourAddr) - return - } - - sw.randomSleep(0) - - err := sw.DialPeerWithAddress(addr) - if err != nil { - switch err.(type) { - case ErrSwitchConnectToSelf, ErrSwitchDuplicatePeerID, ErrCurrentlyDialingOrExistingAddress: - sw.Logger.Debug("Error dialing peer", "err", err) - default: - sw.Logger.Error("Error dialing peer", "err", err) - } - } - }(i) - } -} - -// DialPeerWithAddress dials the given peer and runs sw.addPeer if it connects -// and authenticates successfully. -// If we're currently dialing this address or it belongs to an existing peer, -// ErrCurrentlyDialingOrExistingAddress is returned. -func (sw *Switch) DialPeerWithAddress(addr *NetAddress) error { - if sw.IsDialingOrExistingAddress(addr) { - return ErrCurrentlyDialingOrExistingAddress{addr.String()} - } - - sw.dialing.Set(string(addr.ID), addr) - defer sw.dialing.Delete(string(addr.ID)) - - return sw.addOutboundPeerWithConfig(addr, sw.config) -} - -// sleep for interval plus some random amount of ms on [0, dialRandomizerIntervalMilliseconds] -func (sw *Switch) randomSleep(interval time.Duration) { - // nolint:gosec // G404: Use of weak random number generator - r := time.Duration(mrand.Int63n(dialRandomizerIntervalMilliseconds)) * time.Millisecond - time.Sleep(r + interval) -} - -// IsDialingOrExistingAddress returns true if switch has a peer with the given -// address or dialing it at the moment. -func (sw *Switch) IsDialingOrExistingAddress(addr *NetAddress) bool { - return sw.dialing.Has(string(addr.ID)) || - sw.peers.Has(addr.ID) || - (!sw.config.AllowDuplicateIP && sw.peers.HasIP(addr.IP)) -} - -// AddPersistentPeers allows you to set persistent peers. It ignores -// ErrNetAddressLookup. However, if there are other errors, first encounter is -// returned. -func (sw *Switch) AddPersistentPeers(addrs []string) error { - sw.Logger.Info("Adding persistent peers", "addrs", addrs) - netAddrs, errs := NewNetAddressStrings(addrs) - // report all the errors - for _, err := range errs { - sw.Logger.Error("Error in peer's address", "err", err) - } - // return first non-ErrNetAddressLookup error - for _, err := range errs { - if _, ok := err.(types.ErrNetAddressLookup); ok { - continue - } - return err - } - sw.persistentPeersAddrs = netAddrs - return nil -} - -func (sw *Switch) AddUnconditionalPeerIDs(ids []string) error { - sw.Logger.Info("Adding unconditional peer ids", "ids", ids) - for i, id := range ids { - err := types.NodeID(id).Validate() - if err != nil { - return fmt.Errorf("wrong ID #%d: %w", i, err) - } - sw.unconditionalPeerIDs[types.NodeID(id)] = struct{}{} - } - return nil -} - -func (sw *Switch) AddPrivatePeerIDs(ids []string) error { - validIDs := make([]string, 0, len(ids)) - for i, id := range ids { - err := types.NodeID(id).Validate() - if err != nil { - return fmt.Errorf("wrong ID #%d: %w", i, err) - } - validIDs = append(validIDs, id) - } - - sw.addrBook.AddPrivateIDs(validIDs) - - return nil -} - -func (sw *Switch) IsPeerPersistent(na *NetAddress) bool { - for _, pa := range sw.persistentPeersAddrs { - if pa.Equals(na) { - return true - } - } - return false -} - -func (sw *Switch) acceptRoutine() { - for { - var peerNodeInfo types.NodeInfo - c, err := sw.transport.Accept() - if err == nil { - // NOTE: The legacy MConn transport did handshaking in Accept(), - // which was asynchronous and avoided head-of-line-blocking. - // However, as handshakes are being migrated out from the transport, - // we just do it synchronously here for now. - peerNodeInfo, _, err = sw.handshakePeer(c, "") - } - if err == nil { - err = sw.filterConn(c.(*mConnConnection).conn) - } - if err != nil { - if c != nil { - _ = c.Close() - } - if err == io.EOF { - err = ErrTransportClosed{} - } - switch err := err.(type) { - case ErrRejected: - addr := err.Addr() - if err.IsSelf() { - // Remove the given address from the address book and add to our addresses - // to avoid dialing in the future. - sw.addrBook.RemoveAddress(&addr) - sw.addrBook.AddOurAddress(&addr) - } - if err.IsIncompatible() { - sw.addrBook.RemoveAddress(&addr) - } - - sw.Logger.Info( - "Inbound Peer rejected", - "err", err, - "numPeers", sw.peers.Size(), - ) - - continue - case ErrFilterTimeout: - sw.Logger.Error( - "Peer filter timed out", - "err", err, - ) - - continue - case ErrTransportClosed: - sw.Logger.Error( - "Stopped accept routine, as transport is closed", - "numPeers", sw.peers.Size(), - ) - default: - sw.Logger.Error( - "Accept on transport errored", - "err", err, - "numPeers", sw.peers.Size(), - ) - // We could instead have a retry loop around the acceptRoutine, - // but that would need to stop and let the node shutdown eventually. - // So might as well panic and let process managers restart the node. - // There's no point in letting the node run without the acceptRoutine, - // since it won't be able to accept new connections. - panic(fmt.Errorf("accept routine exited: %v", err)) - } - - break - } - - isPersistent := false - addr, err := peerNodeInfo.NetAddress() - if err == nil { - isPersistent = sw.IsPeerPersistent(addr) - } - - p := newPeer( - peerNodeInfo, - newPeerConn(false, isPersistent, c), - sw.reactorsByCh, - sw.StopPeerForError, - PeerMetrics(sw.metrics), - ) - - if !sw.IsPeerUnconditional(p.NodeInfo().ID()) { - // Ignore connection if we already have enough peers. - _, in, _ := sw.NumPeers() - if in >= sw.config.MaxNumInboundPeers { - sw.Logger.Info( - "Ignoring inbound connection: already have enough inbound peers", - "address", p.SocketAddr(), - "have", in, - "max", sw.config.MaxNumInboundPeers, - ) - _ = p.CloseConn() - continue - } - - } - - if err := sw.addPeer(p); err != nil { - _ = p.CloseConn() - if p.IsRunning() { - _ = p.Stop() - } - sw.conns.RemoveAddr(p.RemoteAddr()) - sw.Logger.Info( - "Ignoring inbound connection: error while adding peer", - "err", err, - "id", p.ID(), - ) - } - } -} - -// dial the peer; make secret connection; authenticate against the dialed ID; -// add the peer. -// if dialing fails, start the reconnect loop. If handshake fails, it's over. -// If peer is started successfully, reconnectLoop will start when -// StopPeerForError is called. -func (sw *Switch) addOutboundPeerWithConfig( - addr *NetAddress, - cfg *config.P2PConfig, -) error { - sw.Logger.Info("Dialing peer", "address", addr) - - // XXX(xla): Remove the leakage of test concerns in implementation. - if cfg.TestDialFail { - go sw.reconnectToPeer(addr) - return fmt.Errorf("dial err (peerConfig.DialFail == true)") - } - - // Hardcoded timeout moved from MConn transport during refactoring. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - var peerNodeInfo types.NodeInfo - c, err := sw.transport.Dial(ctx, Endpoint{ - Protocol: MConnProtocol, - IP: addr.IP, - Port: addr.Port, - }) - if err == nil { - peerNodeInfo, _, err = sw.handshakePeer(c, addr.ID) - } - if err == nil { - err = sw.filterConn(c.(*mConnConnection).conn) - } - if err != nil { - if c != nil { - _ = c.Close() - } - if e, ok := err.(ErrRejected); ok { - if e.IsSelf() { - // Remove the given address from the address book and add to our addresses - // to avoid dialing in the future. - sw.addrBook.RemoveAddress(addr) - sw.addrBook.AddOurAddress(addr) - } - if e.IsIncompatible() { - sw.addrBook.RemoveAddress(addr) - } - - return err - } - - // retry persistent peers after - // any dial error besides IsSelf() - if sw.IsPeerPersistent(addr) { - go sw.reconnectToPeer(addr) - } - - return err - } - - p := newPeer( - peerNodeInfo, - newPeerConn(true, sw.IsPeerPersistent(addr), c), - sw.reactorsByCh, - sw.StopPeerForError, - PeerMetrics(sw.metrics), - ) - - if err := sw.addPeer(p); err != nil { - _ = p.CloseConn() - if p.IsRunning() { - _ = p.Stop() - } - sw.conns.RemoveAddr(p.RemoteAddr()) - return err - } - - return nil -} - -func (sw *Switch) handshakePeer( - c Connection, - expectPeerID types.NodeID, -) (types.NodeInfo, crypto.PubKey, error) { - // Moved from transport and hardcoded until legacy P2P stack removal. - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - peerInfo, peerKey, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - return peerInfo, peerKey, ErrRejected{ - conn: c.(*mConnConnection).conn, - err: fmt.Errorf("handshake failed: %v", err), - isAuthFailure: true, - } - } - - if err = peerInfo.Validate(); err != nil { - return peerInfo, peerKey, ErrRejected{ - conn: c.(*mConnConnection).conn, - err: err, - isNodeInfoInvalid: true, - } - } - - // For outgoing conns, ensure connection key matches dialed key. - if expectPeerID != "" { - peerID := types.NodeIDFromPubKey(peerKey) - if expectPeerID != peerID { - return peerInfo, peerKey, ErrRejected{ - conn: c.(*mConnConnection).conn, - id: peerID, - err: fmt.Errorf( - "conn.ID (%v) dialed ID (%v) mismatch", - peerID, - expectPeerID, - ), - isAuthFailure: true, - } - } - } - - if sw.nodeInfo.ID() == peerInfo.ID() { - return peerInfo, peerKey, ErrRejected{ - addr: *types.NewNetAddress(peerInfo.ID(), c.(*mConnConnection).conn.RemoteAddr()), - conn: c.(*mConnConnection).conn, - id: peerInfo.ID(), - isSelf: true, - } - } - - if err = sw.nodeInfo.CompatibleWith(peerInfo); err != nil { - return peerInfo, peerKey, ErrRejected{ - conn: c.(*mConnConnection).conn, - err: err, - id: peerInfo.ID(), - isIncompatible: true, - } - } - - return peerInfo, peerKey, nil -} - -func (sw *Switch) filterPeer(p Peer) error { - // Avoid duplicate - if sw.peers.Has(p.ID()) { - return ErrRejected{id: p.ID(), isDuplicate: true} - } - - errc := make(chan error, len(sw.peerFilters)) - - for _, f := range sw.peerFilters { - go func(f PeerFilterFunc, p Peer, errc chan<- error) { - errc <- f(sw.peers, p) - }(f, p, errc) - } - - for i := 0; i < cap(errc); i++ { - select { - case err := <-errc: - if err != nil { - return ErrRejected{id: p.ID(), err: err, isFiltered: true} - } - case <-time.After(sw.filterTimeout): - return ErrFilterTimeout{} - } - } - - return nil -} - -// filterConn filters a connection, rejecting it if this function errors. -// -// FIXME: This is only here for compatibility with the current Switch code. In -// the new P2P stack, peer/connection filtering should be moved into the Router -// or PeerManager and removed from here. -func (sw *Switch) filterConn(conn net.Conn) error { - if sw.conns.Has(conn) { - return ErrRejected{conn: conn, isDuplicate: true} - } - - host, _, err := net.SplitHostPort(conn.RemoteAddr().String()) - if err != nil { - return err - } - ip := net.ParseIP(host) - if ip == nil { - return fmt.Errorf("connection address has invalid IP address %q", host) - } - - // Apply filter callbacks. - chErr := make(chan error, len(sw.connFilters)) - for _, connFilter := range sw.connFilters { - go func(connFilter ConnFilterFunc) { - chErr <- connFilter(sw.conns, conn, []net.IP{ip}) - }(connFilter) - } - - for i := 0; i < cap(chErr); i++ { - select { - case err := <-chErr: - if err != nil { - return ErrRejected{conn: conn, err: err, isFiltered: true} - } - case <-time.After(sw.filterTimeout): - return ErrFilterTimeout{} - } - - } - - // FIXME: Doesn't really make sense to set this here, but we preserve the - // behavior from the previous P2P transport implementation. - sw.conns.Set(conn, []net.IP{ip}) - return nil -} - -// addPeer starts up the Peer and adds it to the Switch. Error is returned if -// the peer is filtered out or failed to start or can't be added. -func (sw *Switch) addPeer(p Peer) error { - if err := sw.filterPeer(p); err != nil { - return err - } - - p.SetLogger(sw.Logger.With("peer", p.SocketAddr())) - - // Handle the shut down case where the switch has stopped but we're - // concurrently trying to add a peer. - if !sw.IsRunning() { - // XXX should this return an error or just log and terminate? - sw.Logger.Error("Won't start a peer - switch is not running", "peer", p) - return nil - } - - // Add some data to the peer, which is required by reactors. - for _, reactor := range sw.reactors { - p = reactor.InitPeer(p) - } - - // Start the peer's send/recv routines. - // Must start it before adding it to the peer set - // to prevent Start and Stop from being called concurrently. - err := p.Start() - if err != nil { - // Should never happen - sw.Logger.Error("Error starting peer", "err", err, "peer", p) - return err - } - - // Add the peer to PeerSet. Do this before starting the reactors - // so that if Receive errors, we will find the peer and remove it. - // Add should not err since we already checked peers.Has(). - if err := sw.peers.Add(p); err != nil { - return err - } - sw.metrics.Peers.Add(float64(1)) - - // Start all the reactor protocols on the peer. - for _, reactor := range sw.reactors { - reactor.AddPeer(p) - } - - sw.Logger.Info("Added peer", "peer", p) - - return nil -} - -// NewNetAddressStrings returns an array of NetAddress'es build using -// the provided strings. -func NewNetAddressStrings(addrs []string) ([]*NetAddress, []error) { - netAddrs := make([]*NetAddress, 0) - errs := make([]error, 0) - for _, addr := range addrs { - netAddr, err := types.NewNetAddressString(addr) - if err != nil { - errs = append(errs, err) - } else { - netAddrs = append(netAddrs, netAddr) - } - } - return netAddrs, errs -} diff --git a/internal/p2p/switch_test.go b/internal/p2p/switch_test.go deleted file mode 100644 index 8cb755c9f..000000000 --- a/internal/p2p/switch_test.go +++ /dev/null @@ -1,932 +0,0 @@ -package p2p - -import ( - "bytes" - "context" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "regexp" - "strconv" - "sync/atomic" - "testing" - "time" - - "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/crypto/ed25519" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" -) - -var ( - cfg *config.P2PConfig - ctx = context.Background() -) - -func init() { - cfg = config.DefaultP2PConfig() - cfg.PexReactor = true - cfg.AllowDuplicateIP = true -} - -type PeerMessage struct { - PeerID types.NodeID - Bytes []byte - Counter int -} - -type TestReactor struct { - BaseReactor - - mtx tmsync.Mutex - channels []*conn.ChannelDescriptor - logMessages bool - msgsCounter int - msgsReceived map[byte][]PeerMessage -} - -func NewTestReactor(channels []*conn.ChannelDescriptor, logMessages bool) *TestReactor { - tr := &TestReactor{ - channels: channels, - logMessages: logMessages, - msgsReceived: make(map[byte][]PeerMessage), - } - tr.BaseReactor = *NewBaseReactor("TestReactor", tr) - tr.SetLogger(log.TestingLogger()) - return tr -} - -func (tr *TestReactor) GetChannels() []*conn.ChannelDescriptor { - return tr.channels -} - -func (tr *TestReactor) AddPeer(peer Peer) {} - -func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {} - -func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) { - if tr.logMessages { - tr.mtx.Lock() - defer tr.mtx.Unlock() - // fmt.Printf("Received: %X, %X\n", chID, msgBytes) - tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.ID(), msgBytes, tr.msgsCounter}) - tr.msgsCounter++ - } -} - -func (tr *TestReactor) getMsgs(chID byte) []PeerMessage { - tr.mtx.Lock() - defer tr.mtx.Unlock() - return tr.msgsReceived[chID] -} - -//----------------------------------------------------------------------------- - -// convenience method for creating two switches connected to each other. -// XXX: note this uses net.Pipe and not a proper TCP conn -func MakeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switch, *Switch) { - // Create two switches that will be interconnected. - switches := MakeConnectedSwitches(cfg, 2, initSwitch, Connect2Switches) - return switches[0], switches[1] -} - -func initSwitchFunc(i int, sw *Switch) *Switch { - sw.SetAddrBook(&AddrBookMock{ - Addrs: make(map[string]struct{}), - OurAddrs: make(map[string]struct{})}) - - // Make two reactors of two channels each - sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x00), Priority: 10}, - {ID: byte(0x01), Priority: 10}, - }, true)) - sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x02), Priority: 10}, - {ID: byte(0x03), Priority: 10}, - }, true)) - - return sw -} - -func TestSwitches(t *testing.T) { - s1, s2 := MakeSwitchPair(t, initSwitchFunc) - t.Cleanup(func() { - if err := s1.Stop(); err != nil { - t.Error(err) - } - }) - t.Cleanup(func() { - if err := s2.Stop(); err != nil { - t.Error(err) - } - }) - - if s1.Peers().Size() != 1 { - t.Errorf("expected exactly 1 peer in s1, got %v", s1.Peers().Size()) - } - if s2.Peers().Size() != 1 { - t.Errorf("expected exactly 1 peer in s2, got %v", s2.Peers().Size()) - } - - // Lets send some messages - ch0Msg := []byte("channel zero") - ch1Msg := []byte("channel foo") - ch2Msg := []byte("channel bar") - - s1.Broadcast(byte(0x00), ch0Msg) - s1.Broadcast(byte(0x01), ch1Msg) - s1.Broadcast(byte(0x02), ch2Msg) - - assertMsgReceivedWithTimeout(t, - ch0Msg, - byte(0x00), - s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) - assertMsgReceivedWithTimeout(t, - ch1Msg, - byte(0x01), - s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) - assertMsgReceivedWithTimeout(t, - ch2Msg, - byte(0x02), - s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second) -} - -func assertMsgReceivedWithTimeout( - t *testing.T, - msgBytes []byte, - channel byte, - reactor *TestReactor, - checkPeriod, - timeout time.Duration, -) { - ticker := time.NewTicker(checkPeriod) - for { - select { - case <-ticker.C: - msgs := reactor.getMsgs(channel) - if len(msgs) > 0 { - if !bytes.Equal(msgs[0].Bytes, msgBytes) { - t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msgBytes, msgs[0].Bytes) - } - return - } - - case <-time.After(timeout): - t.Fatalf("Expected to have received 1 message in channel #%v, got zero", channel) - } - } -} - -func TestSwitchFiltersOutItself(t *testing.T) { - s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc, log.TestingLogger()) - - // simulate s1 having a public IP by creating a remote peer with the same ID - rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: cfg} - rp.Start() - - // addr should be rejected in addPeer based on the same ID - err := s1.DialPeerWithAddress(rp.Addr()) - if assert.Error(t, err) { - if err, ok := err.(ErrRejected); ok { - if !err.IsSelf() { - t.Errorf("expected self to be rejected") - } - } else { - t.Errorf("expected ErrRejected") - } - } - - assert.True(t, s1.addrBook.OurAddress(rp.Addr())) - assert.False(t, s1.addrBook.HasAddress(rp.Addr())) - - rp.Stop() - - assertNoPeersAfterTimeout(t, s1, 100*time.Millisecond) -} - -func TestSwitchDialFailsOnIncompatiblePeer(t *testing.T) { - s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc, log.TestingLogger()) - ni := s1.NodeInfo() - ni.Network = "network-a" - s1.SetNodeInfo(ni) - - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg, Network: "network-b"} - rp.Start() - defer rp.Stop() - - err := s1.DialPeerWithAddress(rp.Addr()) - require.Error(t, err) - errRejected, ok := err.(ErrRejected) - require.True(t, ok, "expected error to be of type IsRejected") - require.True(t, errRejected.IsIncompatible(), "expected error to be IsIncompatible") - - // remote peer should not have been added to the addressbook - require.False(t, s1.addrBook.HasAddress(rp.Addr())) -} - -func TestSwitchPeerFilter(t *testing.T) { - var ( - filters = []PeerFilterFunc{ - func(_ IPeerSet, _ Peer) error { return nil }, - func(_ IPeerSet, _ Peer) error { return fmt.Errorf("denied") }, - func(_ IPeerSet, _ Peer) error { return nil }, - } - sw = MakeSwitch( - cfg, - 1, - "testing", - "123.123.123", - initSwitchFunc, - log.TestingLogger(), - SwitchPeerFilters(filters...), - ) - ) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - t.Cleanup(rp.Stop) - - c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr())) - if err != nil { - t.Fatal(err) - } - peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - t.Fatal(err) - } - p := newPeer( - peerInfo, - newPeerConn(true, false, c), - sw.reactorsByCh, - sw.StopPeerForError, - ) - - err = sw.addPeer(p) - if err, ok := err.(ErrRejected); ok { - if !err.IsFiltered() { - t.Errorf("expected peer to be filtered") - } - } else { - t.Errorf("expected ErrRejected") - } -} - -func TestSwitchPeerFilterTimeout(t *testing.T) { - var ( - filters = []PeerFilterFunc{ - func(_ IPeerSet, _ Peer) error { - time.Sleep(10 * time.Millisecond) - return nil - }, - } - sw = MakeSwitch( - cfg, - 1, - "testing", - "123.123.123", - initSwitchFunc, - log.TestingLogger(), - SwitchFilterTimeout(5*time.Millisecond), - SwitchPeerFilters(filters...), - ) - ) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Log(err) - } - }) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr())) - if err != nil { - t.Fatal(err) - } - peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - t.Fatal(err) - } - p := newPeer( - peerInfo, - newPeerConn(true, false, c), - sw.reactorsByCh, - sw.StopPeerForError, - ) - - err = sw.addPeer(p) - if _, ok := err.(ErrFilterTimeout); !ok { - t.Errorf("expected ErrFilterTimeout") - } -} - -func TestSwitchPeerFilterDuplicate(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr())) - if err != nil { - t.Fatal(err) - } - peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - t.Fatal(err) - } - p := newPeer( - peerInfo, - newPeerConn(true, false, c), - sw.reactorsByCh, - sw.StopPeerForError, - ) - - if err := sw.addPeer(p); err != nil { - t.Fatal(err) - } - - err = sw.addPeer(p) - if errRej, ok := err.(ErrRejected); ok { - if !errRej.IsDuplicate() { - t.Errorf("expected peer to be duplicate. got %v", errRej) - } - } else { - t.Errorf("expected ErrRejected, got %v", err) - } -} - -func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) { - time.Sleep(timeout) - if sw.Peers().Size() != 0 { - t.Fatalf("Expected %v to not connect to some peers, got %d", sw, sw.Peers().Size()) - } -} - -func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.Start() - if err != nil { - t.Error(err) - } - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // simulate remote peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - c, err := sw.transport.Dial(ctx, NewEndpoint(rp.Addr())) - if err != nil { - t.Fatal(err) - } - peerInfo, _, err := c.Handshake(ctx, sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - t.Fatal(err) - } - p := newPeer( - peerInfo, - newPeerConn(true, false, c), - sw.reactorsByCh, - sw.StopPeerForError, - ) - - err = sw.addPeer(p) - require.Nil(err) - - require.NotNil(sw.Peers().Get(rp.ID())) - - // simulate failure by closing connection - err = p.CloseConn() - require.NoError(err) - - assertNoPeersAfterTimeout(t, sw, 100*time.Millisecond) - assert.False(p.IsRunning()) -} - -func TestSwitchStopPeerForError(t *testing.T) { - s := httptest.NewServer(promhttp.Handler()) - defer s.Close() - - scrapeMetrics := func() string { - resp, err := http.Get(s.URL) - require.NoError(t, err) - defer resp.Body.Close() - buf, _ := ioutil.ReadAll(resp.Body) - return string(buf) - } - - namespace, subsystem, name := config.TestInstrumentationConfig().Namespace, MetricsSubsystem, "peers" - re := regexp.MustCompile(namespace + `_` + subsystem + `_` + name + ` ([0-9\.]+)`) - peersMetricValue := func() float64 { - matches := re.FindStringSubmatch(scrapeMetrics()) - f, _ := strconv.ParseFloat(matches[1], 64) - return f - } - - p2pMetrics := PrometheusMetrics(namespace) - - // make two connected switches - sw1, sw2 := MakeSwitchPair(t, func(i int, sw *Switch) *Switch { - // set metrics on sw1 - if i == 0 { - opt := WithMetrics(p2pMetrics) - opt(sw) - } - return initSwitchFunc(i, sw) - }) - - assert.Equal(t, len(sw1.Peers().List()), 1) - assert.EqualValues(t, 1, peersMetricValue()) - - // send messages to the peer from sw1 - p := sw1.Peers().List()[0] - p.Send(0x1, []byte("here's a message to send")) - - // stop sw2. this should cause the p to fail, - // which results in calling StopPeerForError internally - t.Cleanup(func() { - if err := sw2.Stop(); err != nil { - t.Error(err) - } - }) - - // now call StopPeerForError explicitly, eg. from a reactor - sw1.StopPeerForError(p, fmt.Errorf("some err")) - - assert.Equal(t, len(sw1.Peers().List()), 0) - assert.EqualValues(t, 0, peersMetricValue()) -} - -func TestSwitchReconnectsToOutboundPersistentPeer(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // 1. simulate failure by closing connection - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - err = sw.AddPersistentPeers([]string{rp.Addr().String()}) - require.NoError(t, err) - - err = sw.DialPeerWithAddress(rp.Addr()) - require.Nil(t, err) - require.NotNil(t, sw.Peers().Get(rp.ID())) - - p := sw.Peers().List()[0] - err = p.(*peer).CloseConn() - require.NoError(t, err) - - waitUntilSwitchHasAtLeastNPeers(sw, 1) - assert.False(t, p.IsRunning()) // old peer instance - assert.Equal(t, 1, sw.Peers().Size()) // new peer instance - - // 2. simulate first time dial failure - rp = &remotePeer{ - PrivKey: ed25519.GenPrivKey(), - Config: cfg, - // Use different interface to prevent duplicate IP filter, this will break - // beyond two peers. - listenAddr: "127.0.0.1:0", - } - rp.Start() - defer rp.Stop() - - conf := config.DefaultP2PConfig() - conf.TestDialFail = true // will trigger a reconnect - err = sw.addOutboundPeerWithConfig(rp.Addr(), conf) - require.NotNil(t, err) - // DialPeerWithAddres - sw.peerConfig resets the dialer - waitUntilSwitchHasAtLeastNPeers(sw, 2) - assert.Equal(t, 2, sw.Peers().Size()) -} - -func TestSwitchReconnectsToInboundPersistentPeer(t *testing.T) { - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // 1. simulate failure by closing the connection - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - err = sw.AddPersistentPeers([]string{rp.Addr().String()}) - require.NoError(t, err) - - conn, err := rp.Dial(sw.NetAddress()) - require.NoError(t, err) - time.Sleep(50 * time.Millisecond) - require.NotNil(t, sw.Peers().Get(rp.ID())) - - conn.Close() - - waitUntilSwitchHasAtLeastNPeers(sw, 1) - assert.Equal(t, 1, sw.Peers().Size()) -} - -func TestSwitchDialPeersAsync(t *testing.T) { - if testing.Short() { - return - } - - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - - err = sw.DialPeersAsync([]string{rp.Addr().String()}) - require.NoError(t, err) - time.Sleep(dialRandomizerIntervalMilliseconds * time.Millisecond) - require.NotNil(t, sw.Peers().Get(rp.ID())) -} - -func waitUntilSwitchHasAtLeastNPeers(sw *Switch, n int) { - for i := 0; i < 20; i++ { - time.Sleep(250 * time.Millisecond) - has := sw.Peers().Size() - if has >= n { - break - } - } -} - -func TestSwitchFullConnectivity(t *testing.T) { - switches := MakeConnectedSwitches(cfg, 3, initSwitchFunc, Connect2Switches) - defer func() { - for _, sw := range switches { - sw := sw - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - } - }() - - for i, sw := range switches { - if sw.Peers().Size() != 2 { - t.Fatalf("Expected each switch to be connected to 2 other, but %d switch only connected to %d", sw.Peers().Size(), i) - } - } -} - -func TestSwitchAcceptRoutine(t *testing.T) { - cfg.MaxNumInboundPeers = 5 - - // Create some unconditional peers. - const unconditionalPeersNum = 2 - var ( - unconditionalPeers = make([]*remotePeer, unconditionalPeersNum) - unconditionalPeerIDs = make([]string, unconditionalPeersNum) - ) - for i := 0; i < unconditionalPeersNum; i++ { - peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - peer.Start() - unconditionalPeers[i] = peer - unconditionalPeerIDs[i] = string(peer.ID()) - } - - // make switch - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc, log.TestingLogger()) - err := sw.AddUnconditionalPeerIDs(unconditionalPeerIDs) - require.NoError(t, err) - err = sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - err := sw.Stop() - require.NoError(t, err) - }) - - // 0. check there are no peers - assert.Equal(t, 0, sw.Peers().Size()) - - // 1. check we connect up to MaxNumInboundPeers - peers := make([]*remotePeer, 0) - for i := 0; i < cfg.MaxNumInboundPeers; i++ { - peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - peers = append(peers, peer) - peer.Start() - c, err := peer.Dial(sw.NetAddress()) - require.NoError(t, err) - // spawn a reading routine to prevent connection from closing - go func(c net.Conn) { - for { - one := make([]byte, 1) - _, err := c.Read(one) - if err != nil { - return - } - } - }(c) - } - time.Sleep(100 * time.Millisecond) - assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size()) - - // 2. check we close new connections if we already have MaxNumInboundPeers peers - peer := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - peer.Start() - conn, err := peer.Dial(sw.NetAddress()) - require.NoError(t, err) - // check conn is closed - one := make([]byte, 1) - _ = conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) - _, err = conn.Read(one) - assert.Error(t, err) - assert.Equal(t, cfg.MaxNumInboundPeers, sw.Peers().Size()) - peer.Stop() - - // 3. check we connect to unconditional peers despite the limit. - for _, peer := range unconditionalPeers { - c, err := peer.Dial(sw.NetAddress()) - require.NoError(t, err) - // spawn a reading routine to prevent connection from closing - go func(c net.Conn) { - for { - one := make([]byte, 1) - _, err := c.Read(one) - if err != nil { - return - } - } - }(c) - } - time.Sleep(10 * time.Millisecond) - assert.Equal(t, cfg.MaxNumInboundPeers+unconditionalPeersNum, sw.Peers().Size()) - - for _, peer := range peers { - peer.Stop() - } - for _, peer := range unconditionalPeers { - peer.Stop() - } -} - -func TestSwitchRejectsIncompatiblePeers(t *testing.T) { - sw := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc, log.TestingLogger()) - ni := sw.NodeInfo() - ni.Network = "network-a" - sw.SetNodeInfo(ni) - - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - err := sw.Stop() - require.NoError(t, err) - }) - - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg, Network: "network-b"} - rp.Start() - defer rp.Stop() - - assert.Equal(t, 0, sw.Peers().Size()) - - conn, err := rp.Dial(sw.NetAddress()) - assert.Nil(t, err) - - one := make([]byte, 1) - _ = conn.SetReadDeadline(time.Now().Add(10 * time.Millisecond)) - _, err = conn.Read(one) - assert.Error(t, err) - - assert.Equal(t, 0, sw.Peers().Size()) -} - -type errorTransport struct { - acceptErr error -} - -func (et errorTransport) String() string { - return "error" -} - -func (et errorTransport) Protocols() []Protocol { - return []Protocol{"error"} -} - -func (et errorTransport) Accept() (Connection, error) { - return nil, et.acceptErr -} -func (errorTransport) Dial(context.Context, Endpoint) (Connection, error) { - panic("not implemented") -} -func (errorTransport) Close() error { panic("not implemented") } -func (errorTransport) FlushClose() error { panic("not implemented") } -func (errorTransport) Endpoints() []Endpoint { panic("not implemented") } - -func TestSwitchAcceptRoutineErrorCases(t *testing.T) { - sw := NewSwitch(cfg, errorTransport{ErrFilterTimeout{}}) - assert.NotPanics(t, func() { - err := sw.Start() - require.NoError(t, err) - err = sw.Stop() - require.NoError(t, err) - }) - - sw = NewSwitch(cfg, errorTransport{ErrRejected{conn: nil, err: errors.New("filtered"), isFiltered: true}}) - assert.NotPanics(t, func() { - err := sw.Start() - require.NoError(t, err) - err = sw.Stop() - require.NoError(t, err) - }) - // TODO(melekes) check we remove our address from addrBook - - sw = NewSwitch(cfg, errorTransport{ErrTransportClosed{}}) - assert.NotPanics(t, func() { - err := sw.Start() - require.NoError(t, err) - err = sw.Stop() - require.NoError(t, err) - }) -} - -// mockReactor checks that InitPeer never called before RemovePeer. If that's -// not true, InitCalledBeforeRemoveFinished will return true. -type mockReactor struct { - *BaseReactor - - // atomic - removePeerInProgress uint32 - initCalledBeforeRemoveFinished uint32 -} - -func (r *mockReactor) GetChannels() []*ChannelDescriptor { - return []*ChannelDescriptor{{ID: testCh, Priority: 10}} -} - -func (r *mockReactor) RemovePeer(peer Peer, reason interface{}) { - atomic.StoreUint32(&r.removePeerInProgress, 1) - defer atomic.StoreUint32(&r.removePeerInProgress, 0) - time.Sleep(100 * time.Millisecond) -} - -func (r *mockReactor) InitPeer(peer Peer) Peer { - if atomic.LoadUint32(&r.removePeerInProgress) == 1 { - atomic.StoreUint32(&r.initCalledBeforeRemoveFinished, 1) - } - - return peer -} - -func (r *mockReactor) InitCalledBeforeRemoveFinished() bool { - return atomic.LoadUint32(&r.initCalledBeforeRemoveFinished) == 1 -} - -// see stopAndRemovePeer -func TestSwitchInitPeerIsNotCalledBeforeRemovePeer(t *testing.T) { - // make reactor - reactor := &mockReactor{} - reactor.BaseReactor = NewBaseReactor("mockReactor", reactor) - - // make switch - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", func(i int, sw *Switch) *Switch { - sw.AddReactor("mock", reactor) - return sw - }, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - // add peer - rp := &remotePeer{PrivKey: ed25519.GenPrivKey(), Config: cfg} - rp.Start() - defer rp.Stop() - _, err = rp.Dial(sw.NetAddress()) - require.NoError(t, err) - - // wait till the switch adds rp to the peer set, then stop the peer asynchronously - for { - time.Sleep(20 * time.Millisecond) - if peer := sw.Peers().Get(rp.ID()); peer != nil { - go sw.StopPeerForError(peer, "test") - break - } - } - - // simulate peer reconnecting to us - _, err = rp.Dial(sw.NetAddress()) - require.NoError(t, err) - // wait till the switch adds rp to the peer set - time.Sleep(50 * time.Millisecond) - - // make sure reactor.RemovePeer is finished before InitPeer is called - assert.False(t, reactor.InitCalledBeforeRemoveFinished()) -} - -func BenchmarkSwitchBroadcast(b *testing.B) { - s1, s2 := MakeSwitchPair(b, func(i int, sw *Switch) *Switch { - // Make bar reactors of bar channels each - sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x00), Priority: 10}, - {ID: byte(0x01), Priority: 10}, - }, false)) - sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x02), Priority: 10}, - {ID: byte(0x03), Priority: 10}, - }, false)) - return sw - }) - - b.Cleanup(func() { - if err := s1.Stop(); err != nil { - b.Error(err) - } - }) - - b.Cleanup(func() { - if err := s2.Stop(); err != nil { - b.Error(err) - } - }) - - // Allow time for goroutines to boot up - time.Sleep(1 * time.Second) - - b.ResetTimer() - - numSuccess, numFailure := 0, 0 - - // Send random message from foo channel to another - for i := 0; i < b.N; i++ { - chID := byte(i % 4) - successChan := s1.Broadcast(chID, []byte("test data")) - for s := range successChan { - if s { - numSuccess++ - } else { - numFailure++ - } - } - } - - b.Logf("success: %v, failure: %v", numSuccess, numFailure) -} - -func TestNewNetAddressStrings(t *testing.T) { - addrs, errs := NewNetAddressStrings([]string{ - "127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeed@127.0.0.2:8080"}) - assert.Len(t, errs, 1) - assert.Equal(t, 2, len(addrs)) -} diff --git a/internal/p2p/test_util.go b/internal/p2p/test_util.go deleted file mode 100644 index b2851646d..000000000 --- a/internal/p2p/test_util.go +++ /dev/null @@ -1,288 +0,0 @@ -package p2p - -import ( - "context" - "fmt" - mrand "math/rand" - "net" - - "github.com/tendermint/tendermint/libs/log" - tmnet "github.com/tendermint/tendermint/libs/net" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/p2p/conn" -) - -const testCh = 0x01 - -//------------------------------------------------ - -func AddPeerToSwitchPeerSet(sw *Switch, peer Peer) { - sw.peers.Add(peer) //nolint:errcheck // ignore error -} - -func CreateRandomPeer(outbound bool) Peer { - addr, netAddr := CreateRoutableAddr() - p := &peer{ - peerConn: peerConn{outbound: outbound}, - nodeInfo: types.NodeInfo{ - NodeID: netAddr.ID, - ListenAddr: netAddr.DialString(), - }, - metrics: NopMetrics(), - } - p.SetLogger(log.TestingLogger().With("peer", addr)) - return p -} - -// nolint:gosec // G404: Use of weak random number generator -func CreateRoutableAddr() (addr string, netAddr *NetAddress) { - for { - var err error - addr = fmt.Sprintf("%X@%v.%v.%v.%v:26656", - tmrand.Bytes(20), - mrand.Int()%256, - mrand.Int()%256, - mrand.Int()%256, - mrand.Int()%256) - netAddr, err = types.NewNetAddressString(addr) - if err != nil { - panic(err) - } - if netAddr.Routable() { - break - } - } - return -} - -//------------------------------------------------------------------ -// Connects switches via arbitrary net.Conn. Used for testing. - -const TestHost = "localhost" - -// MakeConnectedSwitches returns n switches, connected according to the connect func. -// If connect==Connect2Switches, the switches will be fully connected. -// initSwitch defines how the i'th switch should be initialized (ie. with what reactors). -// NOTE: panics if any switch fails to start. -func MakeConnectedSwitches(cfg *config.P2PConfig, - n int, - initSwitch func(int, *Switch) *Switch, - connect func([]*Switch, int, int), -) []*Switch { - switches := make([]*Switch, n) - for i := 0; i < n; i++ { - switches[i] = MakeSwitch(cfg, i, TestHost, "123.123.123", initSwitch, log.TestingLogger()) - } - - if err := StartSwitches(switches); err != nil { - panic(err) - } - - for i := 0; i < n; i++ { - for j := i + 1; j < n; j++ { - connect(switches, i, j) - } - } - - return switches -} - -// Connect2Switches will connect switches i and j via net.Pipe(). -// Blocks until a connection is established. -// NOTE: caller ensures i and j are within bounds. -func Connect2Switches(switches []*Switch, i, j int) { - switchI := switches[i] - switchJ := switches[j] - - c1, c2 := conn.NetPipe() - - doneCh := make(chan struct{}) - go func() { - err := switchI.addPeerWithConnection(c1) - if err != nil { - panic(err) - } - doneCh <- struct{}{} - }() - go func() { - err := switchJ.addPeerWithConnection(c2) - if err != nil { - panic(err) - } - doneCh <- struct{}{} - }() - <-doneCh - <-doneCh -} - -func (sw *Switch) addPeerWithConnection(conn net.Conn) error { - pc, err := testInboundPeerConn(sw.transport.(*MConnTransport), conn) - if err != nil { - if err := conn.Close(); err != nil { - sw.Logger.Error("Error closing connection", "err", err) - } - return err - } - peerNodeInfo, _, err := pc.conn.Handshake(context.Background(), sw.nodeInfo, sw.nodeKey.PrivKey) - if err != nil { - if err := conn.Close(); err != nil { - sw.Logger.Error("Error closing connection", "err", err) - } - return err - } - - p := newPeer( - peerNodeInfo, - pc, - sw.reactorsByCh, - sw.StopPeerForError, - ) - - if err = sw.addPeer(p); err != nil { - pc.CloseConn() - return err - } - - return nil -} - -// StartSwitches calls sw.Start() for each given switch. -// It returns the first encountered error. -func StartSwitches(switches []*Switch) error { - for _, s := range switches { - err := s.Start() // start switch and reactors - if err != nil { - return err - } - } - return nil -} - -func MakeSwitch( - cfg *config.P2PConfig, - i int, - network, version string, - initSwitch func(int, *Switch) *Switch, - logger log.Logger, - opts ...SwitchOption, -) *Switch { - - nodeKey := types.GenNodeKey() - nodeInfo := testNodeInfo(nodeKey.ID, fmt.Sprintf("node%d", i)) - addr, err := types.NewNetAddressString( - nodeKey.ID.AddressString(nodeInfo.ListenAddr), - ) - if err != nil { - panic(err) - } - - swLogger := logger.With("switch", i) - t := NewMConnTransport(swLogger, MConnConfig(cfg), - []*ChannelDescriptor{}, MConnTransportOptions{}) - - // TODO: let the config be passed in? - sw := initSwitch(i, NewSwitch(cfg, t, opts...)) - sw.SetLogger(swLogger) - sw.SetNodeKey(nodeKey) - - if err := t.Listen(NewEndpoint(addr)); err != nil { - panic(err) - } - - ni := nodeInfo - ni.Channels = []byte{} - for ch := range sw.reactorsByCh { - ni.Channels = append(ni.Channels, ch) - } - nodeInfo = ni - - // TODO: We need to setup reactors ahead of time so the NodeInfo is properly - // populated and we don't have to do those awkward overrides and setters. - sw.SetNodeInfo(nodeInfo) - - return sw -} - -func testInboundPeerConn( - transport *MConnTransport, - conn net.Conn, -) (peerConn, error) { - return testPeerConn(transport, conn, false, false) -} - -func testPeerConn( - transport *MConnTransport, - rawConn net.Conn, - outbound, persistent bool, -) (pc peerConn, err error) { - - conn := newMConnConnection(transport.logger, rawConn, transport.mConnConfig, transport.channelDescs) - - return newPeerConn(outbound, persistent, conn), nil -} - -//---------------------------------------------------------------- -// rand node info - -func testNodeInfo(id types.NodeID, name string) types.NodeInfo { - return testNodeInfoWithNetwork(id, name, "testing") -} - -func testNodeInfoWithNetwork(id types.NodeID, name, network string) types.NodeInfo { - return types.NodeInfo{ - ProtocolVersion: defaultProtocolVersion, - NodeID: id, - ListenAddr: fmt.Sprintf("127.0.0.1:%d", getFreePort()), - Network: network, - Version: "1.2.3-rc0-deadbeef", - Channels: []byte{testCh}, - Moniker: name, - Other: types.NodeInfoOther{ - TxIndex: "on", - RPCAddress: fmt.Sprintf("127.0.0.1:%d", getFreePort()), - }, - } -} - -func getFreePort() int { - port, err := tmnet.GetFreePort() - if err != nil { - panic(err) - } - return port -} - -type AddrBookMock struct { - Addrs map[string]struct{} - OurAddrs map[string]struct{} - PrivateAddrs map[string]struct{} -} - -var _ AddrBook = (*AddrBookMock)(nil) - -func (book *AddrBookMock) AddAddress(addr *NetAddress, src *NetAddress) error { - book.Addrs[addr.String()] = struct{}{} - return nil -} -func (book *AddrBookMock) AddOurAddress(addr *NetAddress) { book.OurAddrs[addr.String()] = struct{}{} } -func (book *AddrBookMock) OurAddress(addr *NetAddress) bool { - _, ok := book.OurAddrs[addr.String()] - return ok -} -func (book *AddrBookMock) MarkGood(types.NodeID) {} -func (book *AddrBookMock) HasAddress(addr *NetAddress) bool { - _, ok := book.Addrs[addr.String()] - return ok -} -func (book *AddrBookMock) RemoveAddress(addr *NetAddress) { - delete(book.Addrs, addr.String()) -} -func (book *AddrBookMock) Save() {} -func (book *AddrBookMock) AddPrivateIDs(addrs []string) { - for _, addr := range addrs { - book.PrivateAddrs[addr] = struct{}{} - } -} diff --git a/internal/p2p/transport.go b/internal/p2p/transport.go index a3245dfc8..e78906362 100644 --- a/internal/p2p/transport.go +++ b/internal/p2p/transport.go @@ -7,9 +7,7 @@ import ( "net" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/version" ) //go:generate ../../scripts/mockery_generate.sh Transport|Connection @@ -20,14 +18,6 @@ const ( defaultProtocol Protocol = MConnProtocol ) -// defaultProtocolVersion populates the Block and P2P versions using -// the global values, but not the App. -var defaultProtocolVersion = types.ProtocolVersion{ - P2P: version.P2PProtocol, - Block: version.BlockProtocol, - App: 0, -} - // Protocol identifies a transport protocol. type Protocol string @@ -54,6 +44,10 @@ type Transport interface { // Close stops accepting new connections, but does not close active connections. Close() error + // AddChannelDescriptors is only part of this interface + // temporarily + AddChannelDescriptors([]*ChannelDescriptor) + // Stringer is used to display the transport, e.g. in logs. // // Without this, the logger may use reflection to access and display @@ -91,19 +85,7 @@ type Connection interface { ReceiveMessage() (ChannelID, []byte, error) // SendMessage sends a message on the connection. Returns io.EOF if closed. - // - // FIXME: For compatibility with the legacy P2P stack, it returns an - // additional boolean false if the message timed out waiting to be accepted - // into the send buffer. This should be removed. - SendMessage(ChannelID, []byte) (bool, error) - - // TrySendMessage is a non-blocking version of SendMessage that returns - // immediately if the message buffer is full. It returns true if the message - // was accepted. - // - // FIXME: This method is here for backwards-compatibility with the legacy - // P2P stack and should be removed. - TrySendMessage(ChannelID, []byte) (bool, error) + SendMessage(ChannelID, []byte) error // LocalEndpoint returns the local endpoint for the connection. LocalEndpoint() Endpoint @@ -114,18 +96,6 @@ type Connection interface { // Close closes the connection. Close() error - // FlushClose flushes all pending sends and then closes the connection. - // - // FIXME: This only exists for backwards-compatibility with the current - // MConnection implementation. There should really be a separate Flush() - // method, but there is no easy way to synchronously flush pending data with - // the current MConnection code. - FlushClose() error - - // Status returns the current connection status. - // FIXME: Only here for compatibility with the current Peer code. - Status() conn.ConnectionStatus - // Stringer is used to display the connection, e.g. in logs. // // Without this, the logger may use reflection to access and display @@ -156,12 +126,17 @@ type Endpoint struct { } // NewEndpoint constructs an Endpoint from a types.NetAddress structure. -func NewEndpoint(na *types.NetAddress) Endpoint { +func NewEndpoint(addr string) (Endpoint, error) { + ip, port, err := types.ParseAddressString(addr) + if err != nil { + return Endpoint{}, err + } + return Endpoint{ Protocol: MConnProtocol, - IP: na.IP, - Port: na.Port, - } + IP: ip, + Port: port, + }, nil } // NodeAddress converts the endpoint into a NodeAddress for the given node ID. diff --git a/internal/p2p/transport_mconn.go b/internal/p2p/transport_mconn.go index eca261476..3e0281c39 100644 --- a/internal/p2p/transport_mconn.go +++ b/internal/p2p/transport_mconn.go @@ -348,9 +348,9 @@ func (c *mConnConnection) handshake( } // onReceive is a callback for MConnection received messages. -func (c *mConnConnection) onReceive(chID byte, payload []byte) { +func (c *mConnConnection) onReceive(chID ChannelID, payload []byte) { select { - case c.receiveCh <- mConnMessage{channelID: ChannelID(chID), payload: payload}: + case c.receiveCh <- mConnMessage{channelID: chID, payload: payload}: case <-c.closeCh: } } @@ -377,32 +377,21 @@ func (c *mConnConnection) String() string { } // SendMessage implements Connection. -func (c *mConnConnection) SendMessage(chID ChannelID, msg []byte) (bool, error) { +func (c *mConnConnection) SendMessage(chID ChannelID, msg []byte) error { if chID > math.MaxUint8 { - return false, fmt.Errorf("MConnection only supports 1-byte channel IDs (got %v)", chID) + return fmt.Errorf("MConnection only supports 1-byte channel IDs (got %v)", chID) } select { case err := <-c.errorCh: - return false, err + return err case <-c.closeCh: - return false, io.EOF + return io.EOF default: - return c.mconn.Send(byte(chID), msg), nil - } -} + if ok := c.mconn.Send(chID, msg); !ok { + return errors.New("sending message timed out") + } -// TrySendMessage implements Connection. -func (c *mConnConnection) TrySendMessage(chID ChannelID, msg []byte) (bool, error) { - if chID > math.MaxUint8 { - return false, fmt.Errorf("MConnection only supports 1-byte channel IDs (got %v)", chID) - } - select { - case err := <-c.errorCh: - return false, err - case <-c.closeCh: - return false, io.EOF - default: - return c.mconn.TrySend(byte(chID), msg), nil + return nil } } @@ -442,14 +431,6 @@ func (c *mConnConnection) RemoteEndpoint() Endpoint { return endpoint } -// Status implements Connection. -func (c *mConnConnection) Status() conn.ConnectionStatus { - if c.mconn == nil { - return conn.ConnectionStatus{} - } - return c.mconn.Status() -} - // Close implements Connection. func (c *mConnConnection) Close() error { var err error @@ -463,17 +444,3 @@ func (c *mConnConnection) Close() error { }) return err } - -// FlushClose implements Connection. -func (c *mConnConnection) FlushClose() error { - var err error - c.closeOnce.Do(func() { - if c.mconn != nil && c.mconn.IsRunning() { - c.mconn.FlushStop() - } else { - err = c.conn.Close() - } - close(c.closeCh) - }) - return err -} diff --git a/internal/p2p/transport_mconn_test.go b/internal/p2p/transport_mconn_test.go index 06cd93c0a..d33438109 100644 --- a/internal/p2p/transport_mconn_test.go +++ b/internal/p2p/transport_mconn_test.go @@ -21,7 +21,7 @@ func init() { transport := p2p.NewMConnTransport( log.TestingLogger(), conn.DefaultMConnConfig(), - []*p2p.ChannelDescriptor{{ID: byte(chID), Priority: 1}}, + []*p2p.ChannelDescriptor{{ID: chID, Priority: 1}}, p2p.MConnTransportOptions{}, ) err := transport.Listen(p2p.Endpoint{ @@ -43,7 +43,7 @@ func TestMConnTransport_AcceptBeforeListen(t *testing.T) { transport := p2p.NewMConnTransport( log.TestingLogger(), conn.DefaultMConnConfig(), - []*p2p.ChannelDescriptor{{ID: byte(chID), Priority: 1}}, + []*p2p.ChannelDescriptor{{ID: chID, Priority: 1}}, p2p.MConnTransportOptions{ MaxAcceptedConnections: 2, }, @@ -61,7 +61,7 @@ func TestMConnTransport_AcceptMaxAcceptedConnections(t *testing.T) { transport := p2p.NewMConnTransport( log.TestingLogger(), conn.DefaultMConnConfig(), - []*p2p.ChannelDescriptor{{ID: byte(chID), Priority: 1}}, + []*p2p.ChannelDescriptor{{ID: chID, Priority: 1}}, p2p.MConnTransportOptions{ MaxAcceptedConnections: 2, }, @@ -148,7 +148,7 @@ func TestMConnTransport_Listen(t *testing.T) { transport := p2p.NewMConnTransport( log.TestingLogger(), conn.DefaultMConnConfig(), - []*p2p.ChannelDescriptor{{ID: byte(chID), Priority: 1}}, + []*p2p.ChannelDescriptor{{ID: chID, Priority: 1}}, p2p.MConnTransportOptions{}, ) @@ -195,7 +195,6 @@ func TestMConnTransport_Listen(t *testing.T) { _ = conn.Close() <-dialedChan - time.Sleep(time.Minute) // closing the connection should not error require.NoError(t, peerConn.Close()) diff --git a/internal/p2p/transport_memory.go b/internal/p2p/transport_memory.go index 09a387254..b4161ecd6 100644 --- a/internal/p2p/transport_memory.go +++ b/internal/p2p/transport_memory.go @@ -10,7 +10,6 @@ import ( "github.com/tendermint/tendermint/crypto" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) @@ -118,6 +117,8 @@ func (t *MemoryTransport) String() string { return string(MemoryProtocol) } +func (t *MemoryTransport) AddChannelDescriptors([]*ChannelDescriptor) {} + // Protocols implements Transport. func (t *MemoryTransport) Protocols() []Protocol { return []Protocol{MemoryProtocol} @@ -262,11 +263,6 @@ func (c *MemoryConnection) RemoteEndpoint() Endpoint { } } -// Status implements Connection. -func (c *MemoryConnection) Status() conn.ConnectionStatus { - return conn.ConnectionStatus{} -} - // Handshake implements Connection. func (c *MemoryConnection) Handshake( ctx context.Context, @@ -316,42 +312,21 @@ func (c *MemoryConnection) ReceiveMessage() (ChannelID, []byte, error) { } // SendMessage implements Connection. -func (c *MemoryConnection) SendMessage(chID ChannelID, msg []byte) (bool, error) { +func (c *MemoryConnection) SendMessage(chID ChannelID, msg []byte) error { // Check close first, since channels are buffered. Otherwise, below select // may non-deterministically return non-error even when closed. select { case <-c.closer.Done(): - return false, io.EOF + return io.EOF default: } select { case c.sendCh <- memoryMessage{channelID: chID, message: msg}: c.logger.Debug("sent message", "chID", chID, "msg", msg) - return true, nil + return nil case <-c.closer.Done(): - return false, io.EOF - } -} - -// TrySendMessage implements Connection. -func (c *MemoryConnection) TrySendMessage(chID ChannelID, msg []byte) (bool, error) { - // Check close first, since channels are buffered. Otherwise, below select - // may non-deterministically return non-error even when closed. - select { - case <-c.closer.Done(): - return false, io.EOF - default: - } - - select { - case c.sendCh <- memoryMessage{channelID: chID, message: msg}: - c.logger.Debug("sent message", "chID", chID, "msg", msg) - return true, nil - case <-c.closer.Done(): - return false, io.EOF - default: - return false, nil + return io.EOF } } @@ -366,8 +341,3 @@ func (c *MemoryConnection) Close() error { } return nil } - -// FlushClose implements Connection. -func (c *MemoryConnection) FlushClose() error { - return c.Close() -} diff --git a/internal/p2p/transport_test.go b/internal/p2p/transport_test.go index 1b8ab77f5..cdfb57c70 100644 --- a/internal/p2p/transport_test.go +++ b/internal/p2p/transport_test.go @@ -315,22 +315,16 @@ func TestConnection_FlushClose(t *testing.T) { b := makeTransport(t) ab, _ := dialAcceptHandshake(t, a, b) - // FIXME: FlushClose should be removed (and replaced by separate Flush - // and Close calls if necessary). We can't reliably test it, so we just - // make sure it closes both ends and that it's idempotent. - err := ab.FlushClose() + err := ab.Close() require.NoError(t, err) _, _, err = ab.ReceiveMessage() require.Error(t, err) require.Equal(t, io.EOF, err) - _, err = ab.SendMessage(chID, []byte("closed")) + err = ab.SendMessage(chID, []byte("closed")) require.Error(t, err) require.Equal(t, io.EOF, err) - - err = ab.FlushClose() - require.NoError(t, err) }) } @@ -355,9 +349,8 @@ func TestConnection_SendReceive(t *testing.T) { ab, ba := dialAcceptHandshake(t, a, b) // Can send and receive a to b. - ok, err := ab.SendMessage(chID, []byte("foo")) + err := ab.SendMessage(chID, []byte("foo")) require.NoError(t, err) - require.True(t, ok) ch, msg, err := ba.ReceiveMessage() require.NoError(t, err) @@ -365,30 +358,20 @@ func TestConnection_SendReceive(t *testing.T) { require.Equal(t, chID, ch) // Can send and receive b to a. - _, err = ba.SendMessage(chID, []byte("bar")) + err = ba.SendMessage(chID, []byte("bar")) require.NoError(t, err) _, msg, err = ab.ReceiveMessage() require.NoError(t, err) require.Equal(t, []byte("bar"), msg) - // TrySendMessage also works. - ok, err = ba.TrySendMessage(chID, []byte("try")) - require.NoError(t, err) - require.True(t, ok) - - ch, msg, err = ab.ReceiveMessage() - require.NoError(t, err) - require.Equal(t, []byte("try"), msg) - require.Equal(t, chID, ch) - // Connections should still be active after closing the transports. err = a.Close() require.NoError(t, err) err = b.Close() require.NoError(t, err) - _, err = ab.SendMessage(chID, []byte("still here")) + err = ab.SendMessage(chID, []byte("still here")) require.NoError(t, err) ch, msg, err = ba.ReceiveMessage() require.NoError(t, err) @@ -403,34 +386,18 @@ func TestConnection_SendReceive(t *testing.T) { _, _, err = ab.ReceiveMessage() require.Error(t, err) require.Equal(t, io.EOF, err) - _, err = ab.TrySendMessage(chID, []byte("closed try")) - require.Error(t, err) - require.Equal(t, io.EOF, err) - _, err = ab.SendMessage(chID, []byte("closed")) + + err = ab.SendMessage(chID, []byte("closed")) require.Error(t, err) require.Equal(t, io.EOF, err) _, _, err = ba.ReceiveMessage() require.Error(t, err) require.Equal(t, io.EOF, err) - _, err = ba.TrySendMessage(chID, []byte("closed try")) + + err = ba.SendMessage(chID, []byte("closed")) require.Error(t, err) require.Equal(t, io.EOF, err) - _, err = ba.SendMessage(chID, []byte("closed")) - require.Error(t, err) - require.Equal(t, io.EOF, err) - }) -} - -func TestConnection_Status(t *testing.T) { - withTransports(t, func(t *testing.T, makeTransport transportFactory) { - a := makeTransport(t) - b := makeTransport(t) - ab, _ := dialAcceptHandshake(t, a, b) - - // FIXME: This isn't implemented in all transports, so for now we just - // check that it doesn't panic, which isn't really much of a test. - ab.Status() }) } diff --git a/internal/p2p/types.go b/internal/p2p/types.go index 403f43528..bee99a4fe 100644 --- a/internal/p2p/types.go +++ b/internal/p2p/types.go @@ -5,4 +5,4 @@ import ( ) type ChannelDescriptor = conn.ChannelDescriptor -type ConnectionStatus = conn.ConnectionStatus +type ChannelID = conn.ChannelID diff --git a/internal/p2p/wdrr_queue.go b/internal/p2p/wdrr_queue.go deleted file mode 100644 index b99403be2..000000000 --- a/internal/p2p/wdrr_queue.go +++ /dev/null @@ -1,287 +0,0 @@ -package p2p - -import ( - "fmt" - "sort" - "strconv" - - "github.com/gogo/protobuf/proto" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/log" -) - -// wrappedEnvelope wraps a p2p Envelope with its precomputed size. -type wrappedEnvelope struct { - envelope Envelope - size uint -} - -// assert the WDDR scheduler implements the queue interface at compile-time -var _ queue = (*wdrrScheduler)(nil) - -// wdrrQueue implements a Weighted Deficit Round Robin (WDRR) scheduling -// algorithm via the queue interface. A WDRR queue is created per peer, where -// the queue will have N number of flows. Each flow corresponds to a p2p Channel, -// so there are n input flows and a single output source, the peer's connection. -// -// The WDRR scheduler contains a shared buffer with a fixed capacity. -// -// Each flow has the following: -// - quantum: The number of bytes that is added to the deficit counter of the -// flow in each round. The flow can send at most quantum bytes at a time. Each -// flow has its own unique quantum, which gives the queue its weighted nature. -// A higher quantum corresponds to a higher weight/priority. The quantum is -// computed as MaxSendBytes * Priority. -// - deficit counter: The number of bytes that the flow is allowed to transmit -// when it is its turn. -// -// See: https://en.wikipedia.org/wiki/Deficit_round_robin -type wdrrScheduler struct { - logger log.Logger - metrics *Metrics - chDescs []ChannelDescriptor - capacity uint - size uint - chPriorities map[ChannelID]uint - buffer map[ChannelID][]wrappedEnvelope - quanta map[ChannelID]uint - deficits map[ChannelID]uint - - closer *tmsync.Closer - doneCh *tmsync.Closer - - enqueueCh chan Envelope - dequeueCh chan Envelope -} - -func newWDRRScheduler( - logger log.Logger, - m *Metrics, - chDescs []ChannelDescriptor, - enqueueBuf, dequeueBuf, capacity uint, -) *wdrrScheduler { - - // copy each ChannelDescriptor and sort them by channel priority - chDescsCopy := make([]ChannelDescriptor, len(chDescs)) - copy(chDescsCopy, chDescs) - sort.Slice(chDescsCopy, func(i, j int) bool { return chDescsCopy[i].Priority > chDescsCopy[j].Priority }) - - var ( - buffer = make(map[ChannelID][]wrappedEnvelope) - chPriorities = make(map[ChannelID]uint) - quanta = make(map[ChannelID]uint) - deficits = make(map[ChannelID]uint) - ) - - for _, chDesc := range chDescsCopy { - chID := ChannelID(chDesc.ID) - chPriorities[chID] = uint(chDesc.Priority) - buffer[chID] = make([]wrappedEnvelope, 0) - quanta[chID] = chDesc.MaxSendBytes * uint(chDesc.Priority) - } - - return &wdrrScheduler{ - logger: logger.With("queue", "wdrr"), - metrics: m, - capacity: capacity, - chPriorities: chPriorities, - chDescs: chDescsCopy, - buffer: buffer, - quanta: quanta, - deficits: deficits, - closer: tmsync.NewCloser(), - doneCh: tmsync.NewCloser(), - enqueueCh: make(chan Envelope, enqueueBuf), - dequeueCh: make(chan Envelope, dequeueBuf), - } -} - -// enqueue returns an unbuffered write-only channel which a producer can send on. -func (s *wdrrScheduler) enqueue() chan<- Envelope { - return s.enqueueCh -} - -// dequeue returns an unbuffered read-only channel which a consumer can read from. -func (s *wdrrScheduler) dequeue() <-chan Envelope { - return s.dequeueCh -} - -func (s *wdrrScheduler) closed() <-chan struct{} { - return s.closer.Done() -} - -// close closes the WDRR queue. After this call enqueue() will block, so the -// caller must select on closed() as well to avoid blocking forever. The -// enqueue() and dequeue() along with the internal channels will NOT be closed. -// Note, close() will block until all externally spawned goroutines have exited. -func (s *wdrrScheduler) close() { - s.closer.Close() - <-s.doneCh.Done() -} - -// start starts the WDRR queue process in a blocking goroutine. This must be -// called before the queue can start to process and accept Envelopes. -func (s *wdrrScheduler) start() { - go s.process() -} - -// process starts a blocking WDRR scheduler process, where we continuously -// evaluate if we need to attempt to enqueue an Envelope or schedule Envelopes -// to be dequeued and subsequently read and sent on the source connection. -// Internally, each p2p Channel maps to a flow, where each flow has a deficit -// and a quantum. -// -// For each Envelope requested to be enqueued, we evaluate if there is sufficient -// capacity in the shared buffer to add the Envelope. If so, it is added. -// Otherwise, we evaluate all flows of lower priority where we attempt find an -// existing Envelope in the shared buffer of sufficient size that can be dropped -// in place of the incoming Envelope. If there is no such Envelope that can be -// dropped, then the incoming Envelope is dropped. -// -// When there is nothing to be enqueued, we perform the WDRR algorithm and -// determine which Envelopes can be dequeued. For each Envelope that can be -// dequeued, it is sent on the dequeueCh. Specifically, for each flow, if it is -// non-empty, its deficit counter is incremented by its quantum value. Then, the -// value of the deficit counter is a maximal amount of bytes that can be sent at -// this round. If the deficit counter is greater than the Envelopes's message -// size at the head of the queue (HoQ), this envelope can be sent and the value -// of the counter is decremented by the message's size. Then, the size of the -// next Envelopes's message is compared to the counter value, etc. Once the flow -// is empty or the value of the counter is insufficient, the scheduler will skip -// to the next flow. If the flow is empty, the value of the deficit counter is -// reset to 0. -// -// XXX/TODO: Evaluate the single goroutine scheduler mechanism. In other words, -// evaluate the effectiveness and performance of having a single goroutine -// perform handling both enqueueing and dequeueing logic. Specifically, there -// is potentially contention between reading off of enqueueCh and trying to -// enqueue while also attempting to perform the WDRR algorithm and find the next -// set of Envelope(s) to send on the dequeueCh. Alternatively, we could consider -// separate scheduling goroutines, but then that requires the use of mutexes and -// possibly a degrading performance. -func (s *wdrrScheduler) process() { - defer s.doneCh.Close() - - for { - select { - case <-s.closer.Done(): - return - - case e := <-s.enqueueCh: - // attempt to enqueue the incoming Envelope - chIDStr := strconv.Itoa(int(e.channelID)) - wEnv := wrappedEnvelope{envelope: e, size: uint(proto.Size(e.Message))} - msgSize := wEnv.size - - s.metrics.PeerPendingSendBytes.With("peer_id", string(e.To)).Add(float64(msgSize)) - - // If we're at capacity, we need to either drop the incoming Envelope or - // an Envelope from a lower priority flow. Otherwise, we add the (wrapped) - // envelope to the flow's queue. - if s.size+wEnv.size > s.capacity { - chPriority := s.chPriorities[e.channelID] - - var ( - canDrop bool - dropIdx int - dropChID ChannelID - ) - - // Evaluate all lower priority flows and determine if there exists an - // Envelope that is of equal or greater size that we can drop in favor - // of the incoming Envelope. - for i := len(s.chDescs) - 1; i >= 0 && uint(s.chDescs[i].Priority) < chPriority && !canDrop; i-- { - currChID := ChannelID(s.chDescs[i].ID) - flow := s.buffer[currChID] - - for j := 0; j < len(flow) && !canDrop; j++ { - if flow[j].size >= wEnv.size { - canDrop = true - dropIdx = j - dropChID = currChID - break - } - } - } - - // If we can drop an existing Envelope, drop it and enqueue the incoming - // Envelope. - if canDrop { - chIDStr = strconv.Itoa(int(dropChID)) - chPriority = s.chPriorities[dropChID] - msgSize = s.buffer[dropChID][dropIdx].size - - // Drop Envelope for the lower priority flow and update the queue's - // buffer size - s.size -= msgSize - s.buffer[dropChID] = append(s.buffer[dropChID][:dropIdx], s.buffer[dropChID][dropIdx+1:]...) - - // add the incoming Envelope and update queue's buffer size - s.size += wEnv.size - s.buffer[e.channelID] = append(s.buffer[e.channelID], wEnv) - s.metrics.PeerQueueMsgSize.With("ch_id", chIDStr).Set(float64(wEnv.size)) - } - - // We either dropped the incoming Enevelope or one from an existing - // lower priority flow. - s.metrics.PeerQueueDroppedMsgs.With("ch_id", chIDStr).Add(1) - s.logger.Debug( - "dropped envelope", - "ch_id", chIDStr, - "priority", chPriority, - "capacity", s.capacity, - "msg_size", msgSize, - ) - } else { - // we have sufficient capacity to enqueue the incoming Envelope - s.metrics.PeerQueueMsgSize.With("ch_id", chIDStr).Set(float64(wEnv.size)) - s.buffer[e.channelID] = append(s.buffer[e.channelID], wEnv) - s.size += wEnv.size - } - - default: - // perform the WDRR algorithm - for _, chDesc := range s.chDescs { - chID := ChannelID(chDesc.ID) - - // only consider non-empty flows - if len(s.buffer[chID]) > 0 { - // bump flow's quantum - s.deficits[chID] += s.quanta[chID] - - // grab the flow's current deficit counter and HoQ (wrapped) Envelope - d := s.deficits[chID] - we := s.buffer[chID][0] - - // While the flow is non-empty and we can send the current Envelope - // on the dequeueCh: - // - // 1. send the Envelope - // 2. update the scheduler's shared buffer's size - // 3. update the flow's deficit - // 4. remove from the flow's queue - // 5. grab the next HoQ Envelope and flow's deficit - for len(s.buffer[chID]) > 0 && d >= we.size { - s.metrics.PeerSendBytesTotal.With( - "chID", fmt.Sprint(chID), - "peer_id", string(we.envelope.To)).Add(float64(we.size)) - s.dequeueCh <- we.envelope - s.size -= we.size - s.deficits[chID] -= we.size - s.buffer[chID] = s.buffer[chID][1:] - - if len(s.buffer[chID]) > 0 { - d = s.deficits[chID] - we = s.buffer[chID][0] - } - } - } - - // reset the flow's deficit to zero if it is empty - if len(s.buffer[chID]) == 0 { - s.deficits[chID] = 0 - } - } - } - } -} diff --git a/internal/p2p/wdrr_queue_test.go b/internal/p2p/wdrr_queue_test.go deleted file mode 100644 index d49c77e76..000000000 --- a/internal/p2p/wdrr_queue_test.go +++ /dev/null @@ -1,208 +0,0 @@ -package p2p - -import ( - "math" - "math/rand" - "testing" - "time" - - gogotypes "github.com/gogo/protobuf/types" - "github.com/stretchr/testify/require" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/log" -) - -type testMessage = gogotypes.StringValue - -func TestWDRRQueue_EqualWeights(t *testing.T) { - chDescs := []ChannelDescriptor{ - {ID: 0x01, Priority: 1, MaxSendBytes: 4}, - {ID: 0x02, Priority: 1, MaxSendBytes: 4}, - {ID: 0x03, Priority: 1, MaxSendBytes: 4}, - {ID: 0x04, Priority: 1, MaxSendBytes: 4}, - {ID: 0x05, Priority: 1, MaxSendBytes: 4}, - {ID: 0x06, Priority: 1, MaxSendBytes: 4}, - } - - peerQueue := newWDRRScheduler(log.NewNopLogger(), NopMetrics(), chDescs, 1000, 1000, 120) - peerQueue.start() - - totalMsgs := make(map[ChannelID]int) - deliveredMsgs := make(map[ChannelID]int) - successRates := make(map[ChannelID]float64) - - closer := tmsync.NewCloser() - - go func() { - timout := 10 * time.Second - ticker := time.NewTicker(timout) - defer ticker.Stop() - - for { - select { - case e := <-peerQueue.dequeue(): - deliveredMsgs[e.channelID]++ - ticker.Reset(timout) - - case <-ticker.C: - closer.Close() - } - } - }() - - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - maxMsgs := 5000 - minMsgs := 1000 - - for _, chDesc := range chDescs { - total := rng.Intn(maxMsgs-minMsgs) + minMsgs // total = rand[minMsgs, maxMsgs) - totalMsgs[ChannelID(chDesc.ID)] = total - - go func(cID ChannelID, n int) { - for i := 0; i < n; i++ { - peerQueue.enqueue() <- Envelope{ - channelID: cID, - Message: &testMessage{Value: "foo"}, // 5 bytes - } - } - }(ChannelID(chDesc.ID), total) - } - - // wait for dequeueing to complete - <-closer.Done() - - // close queue and wait for cleanup - peerQueue.close() - <-peerQueue.closed() - - var ( - sum float64 - stdDev float64 - ) - - for _, chDesc := range peerQueue.chDescs { - chID := ChannelID(chDesc.ID) - require.Zero(t, peerQueue.deficits[chID], "expected flow deficit to be zero") - require.Len(t, peerQueue.buffer[chID], 0, "expected flow queue to be empty") - - total := totalMsgs[chID] - delivered := deliveredMsgs[chID] - successRate := float64(delivered) / float64(total) - - sum += successRate - successRates[chID] = successRate - - // require some messages dropped - require.Less(t, delivered, total, "expected some messages to be dropped") - require.Less(t, successRate, 1.0, "expected a success rate below 100%") - } - - require.Zero(t, peerQueue.size, "expected scheduler size to be zero") - - numFlows := float64(len(peerQueue.buffer)) - mean := sum / numFlows - - for _, successRate := range successRates { - stdDev += math.Pow(successRate-mean, 2) - } - - stdDev = math.Sqrt(stdDev / numFlows) - require.Less(t, stdDev, 0.02, "expected success rate standard deviation to be less than 2%") -} - -func TestWDRRQueue_DecreasingWeights(t *testing.T) { - chDescs := []ChannelDescriptor{ - {ID: 0x01, Priority: 18, MaxSendBytes: 4}, - {ID: 0x02, Priority: 10, MaxSendBytes: 4}, - {ID: 0x03, Priority: 2, MaxSendBytes: 4}, - {ID: 0x04, Priority: 1, MaxSendBytes: 4}, - {ID: 0x05, Priority: 1, MaxSendBytes: 4}, - {ID: 0x06, Priority: 1, MaxSendBytes: 4}, - } - - peerQueue := newWDRRScheduler(log.NewNopLogger(), NopMetrics(), chDescs, 0, 0, 500) - peerQueue.start() - - totalMsgs := make(map[ChannelID]int) - deliveredMsgs := make(map[ChannelID]int) - successRates := make(map[ChannelID]float64) - - for _, chDesc := range chDescs { - total := 1000 - totalMsgs[ChannelID(chDesc.ID)] = total - - go func(cID ChannelID, n int) { - for i := 0; i < n; i++ { - peerQueue.enqueue() <- Envelope{ - channelID: cID, - Message: &testMessage{Value: "foo"}, // 5 bytes - } - } - }(ChannelID(chDesc.ID), total) - } - - closer := tmsync.NewCloser() - - go func() { - timout := 20 * time.Second - ticker := time.NewTicker(timout) - defer ticker.Stop() - - for { - select { - case e := <-peerQueue.dequeue(): - deliveredMsgs[e.channelID]++ - ticker.Reset(timout) - - case <-ticker.C: - closer.Close() - } - } - }() - - // wait for dequeueing to complete - <-closer.Done() - - // close queue and wait for cleanup - peerQueue.close() - <-peerQueue.closed() - - for i, chDesc := range peerQueue.chDescs { - chID := ChannelID(chDesc.ID) - require.Zero(t, peerQueue.deficits[chID], "expected flow deficit to be zero") - require.Len(t, peerQueue.buffer[chID], 0, "expected flow queue to be empty") - - total := totalMsgs[chID] - delivered := deliveredMsgs[chID] - successRate := float64(delivered) / float64(total) - - successRates[chID] = successRate - - // Require some messages dropped. Note, the top weighted flows may not have - // any dropped if lower priority non-empty queues always exist. - if i > 2 { - require.Less(t, delivered, total, "expected some messages to be dropped") - require.Less(t, successRate, 1.0, "expected a success rate below 100%") - } - } - - require.Zero(t, peerQueue.size, "expected scheduler size to be zero") - - // require channel 0x01 to have the highest success rate due to its weight - ch01Rate := successRates[ChannelID(chDescs[0].ID)] - for i := 1; i < len(chDescs); i++ { - require.GreaterOrEqual(t, ch01Rate, successRates[ChannelID(chDescs[i].ID)]) - } - - // require channel 0x02 to have the 2nd highest success rate due to its weight - ch02Rate := successRates[ChannelID(chDescs[1].ID)] - for i := 2; i < len(chDescs); i++ { - require.GreaterOrEqual(t, ch02Rate, successRates[ChannelID(chDescs[i].ID)]) - } - - // require channel 0x03 to have the 3rd highest success rate due to its weight - ch03Rate := successRates[ChannelID(chDescs[2].ID)] - for i := 3; i < len(chDescs); i++ { - require.GreaterOrEqual(t, ch03Rate, successRates[ChannelID(chDescs[i].ID)]) - } -} diff --git a/proxy/app_conn.go b/internal/proxy/app_conn.go similarity index 58% rename from proxy/app_conn.go rename to internal/proxy/app_conn.go index 8eb90daf3..803875646 100644 --- a/proxy/app_conn.go +++ b/internal/proxy/app_conn.go @@ -2,36 +2,38 @@ package proxy import ( "context" + "time" - abcicli "github.com/tendermint/tendermint/abci/client" + "github.com/go-kit/kit/metrics" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/types" ) -//go:generate ../scripts/mockery_generate.sh AppConnConsensus|AppConnMempool|AppConnQuery|AppConnSnapshot +//go:generate ../../scripts/mockery_generate.sh AppConnConsensus|AppConnMempool|AppConnQuery|AppConnSnapshot //---------------------------------------------------------------------------------------- // Enforce which abci msgs can be sent on a connection at the type level type AppConnConsensus interface { - SetResponseCallback(abcicli.Callback) + SetResponseCallback(abciclient.Callback) Error() error InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error) BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error) - DeliverTxAsync(context.Context, types.RequestDeliverTx) (*abcicli.ReqRes, error) + DeliverTxAsync(context.Context, types.RequestDeliverTx) (*abciclient.ReqRes, error) EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error) CommitSync(context.Context) (*types.ResponseCommit, error) } type AppConnMempool interface { - SetResponseCallback(abcicli.Callback) + SetResponseCallback(abciclient.Callback) Error() error - CheckTxAsync(context.Context, types.RequestCheckTx) (*abcicli.ReqRes, error) + CheckTxAsync(context.Context, types.RequestCheckTx) (*abciclient.ReqRes, error) CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error) - FlushAsync(context.Context) (*abcicli.ReqRes, error) + FlushAsync(context.Context) (*abciclient.ReqRes, error) FlushSync(context.Context) error } @@ -53,19 +55,21 @@ type AppConnSnapshot interface { } //----------------------------------------------------------------------------------------- -// Implements AppConnConsensus (subset of abcicli.Client) +// Implements AppConnConsensus (subset of abciclient.Client) type appConnConsensus struct { - appConn abcicli.Client + metrics *Metrics + appConn abciclient.Client } -func NewAppConnConsensus(appConn abcicli.Client) AppConnConsensus { +func NewAppConnConsensus(appConn abciclient.Client, metrics *Metrics) AppConnConsensus { return &appConnConsensus{ + metrics: metrics, appConn: appConn, } } -func (app *appConnConsensus) SetResponseCallback(cb abcicli.Callback) { +func (app *appConnConsensus) SetResponseCallback(cb abciclient.Callback) { app.appConn.SetResponseCallback(cb) } @@ -77,6 +81,7 @@ func (app *appConnConsensus) InitChainSync( ctx context.Context, req types.RequestInitChain, ) (*types.ResponseInitChain, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "init_chain", "type", "sync"))() return app.appConn.InitChainSync(ctx, req) } @@ -84,10 +89,15 @@ func (app *appConnConsensus) BeginBlockSync( ctx context.Context, req types.RequestBeginBlock, ) (*types.ResponseBeginBlock, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "begin_block", "type", "sync"))() return app.appConn.BeginBlockSync(ctx, req) } -func (app *appConnConsensus) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*abcicli.ReqRes, error) { +func (app *appConnConsensus) DeliverTxAsync( + ctx context.Context, + req types.RequestDeliverTx, +) (*abciclient.ReqRes, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "deliver_tx", "type", "async"))() return app.appConn.DeliverTxAsync(ctx, req) } @@ -95,27 +105,31 @@ func (app *appConnConsensus) EndBlockSync( ctx context.Context, req types.RequestEndBlock, ) (*types.ResponseEndBlock, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "deliver_tx", "type", "sync"))() return app.appConn.EndBlockSync(ctx, req) } func (app *appConnConsensus) CommitSync(ctx context.Context) (*types.ResponseCommit, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "commit", "type", "sync"))() return app.appConn.CommitSync(ctx) } //------------------------------------------------ -// Implements AppConnMempool (subset of abcicli.Client) +// Implements AppConnMempool (subset of abciclient.Client) type appConnMempool struct { - appConn abcicli.Client + metrics *Metrics + appConn abciclient.Client } -func NewAppConnMempool(appConn abcicli.Client) AppConnMempool { +func NewAppConnMempool(appConn abciclient.Client, metrics *Metrics) AppConnMempool { return &appConnMempool{ + metrics: metrics, appConn: appConn, } } -func (app *appConnMempool) SetResponseCallback(cb abcicli.Callback) { +func (app *appConnMempool) SetResponseCallback(cb abciclient.Callback) { app.appConn.SetResponseCallback(cb) } @@ -123,31 +137,37 @@ func (app *appConnMempool) Error() error { return app.appConn.Error() } -func (app *appConnMempool) FlushAsync(ctx context.Context) (*abcicli.ReqRes, error) { +func (app *appConnMempool) FlushAsync(ctx context.Context) (*abciclient.ReqRes, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "flush", "type", "async"))() return app.appConn.FlushAsync(ctx) } func (app *appConnMempool) FlushSync(ctx context.Context) error { + defer addTimeSample(app.metrics.MethodTiming.With("method", "flush", "type", "sync"))() return app.appConn.FlushSync(ctx) } -func (app *appConnMempool) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*abcicli.ReqRes, error) { +func (app *appConnMempool) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*abciclient.ReqRes, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "check_tx", "type", "async"))() return app.appConn.CheckTxAsync(ctx, req) } func (app *appConnMempool) CheckTxSync(ctx context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "check_tx", "type", "sync"))() return app.appConn.CheckTxSync(ctx, req) } //------------------------------------------------ -// Implements AppConnQuery (subset of abcicli.Client) +// Implements AppConnQuery (subset of abciclient.Client) type appConnQuery struct { - appConn abcicli.Client + metrics *Metrics + appConn abciclient.Client } -func NewAppConnQuery(appConn abcicli.Client) AppConnQuery { +func NewAppConnQuery(appConn abciclient.Client, metrics *Metrics) AppConnQuery { return &appConnQuery{ + metrics: metrics, appConn: appConn, } } @@ -157,26 +177,31 @@ func (app *appConnQuery) Error() error { } func (app *appConnQuery) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "echo", "type", "sync"))() return app.appConn.EchoSync(ctx, msg) } func (app *appConnQuery) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "info", "type", "sync"))() return app.appConn.InfoSync(ctx, req) } func (app *appConnQuery) QuerySync(ctx context.Context, reqQuery types.RequestQuery) (*types.ResponseQuery, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "query", "type", "sync"))() return app.appConn.QuerySync(ctx, reqQuery) } //------------------------------------------------ -// Implements AppConnSnapshot (subset of abcicli.Client) +// Implements AppConnSnapshot (subset of abciclient.Client) type appConnSnapshot struct { - appConn abcicli.Client + metrics *Metrics + appConn abciclient.Client } -func NewAppConnSnapshot(appConn abcicli.Client) AppConnSnapshot { +func NewAppConnSnapshot(appConn abciclient.Client, metrics *Metrics) AppConnSnapshot { return &appConnSnapshot{ + metrics: metrics, appConn: appConn, } } @@ -189,6 +214,7 @@ func (app *appConnSnapshot) ListSnapshotsSync( ctx context.Context, req types.RequestListSnapshots, ) (*types.ResponseListSnapshots, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "list_snapshots", "type", "sync"))() return app.appConn.ListSnapshotsSync(ctx, req) } @@ -196,17 +222,29 @@ func (app *appConnSnapshot) OfferSnapshotSync( ctx context.Context, req types.RequestOfferSnapshot, ) (*types.ResponseOfferSnapshot, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "offer_snapshot", "type", "sync"))() return app.appConn.OfferSnapshotSync(ctx, req) } func (app *appConnSnapshot) LoadSnapshotChunkSync( ctx context.Context, req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "load_snapshot_chunk", "type", "sync"))() return app.appConn.LoadSnapshotChunkSync(ctx, req) } func (app *appConnSnapshot) ApplySnapshotChunkSync( ctx context.Context, req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + defer addTimeSample(app.metrics.MethodTiming.With("method", "apply_snapshot_chunk", "type", "sync"))() return app.appConn.ApplySnapshotChunkSync(ctx, req) } + +// addTimeSample returns a function that, when called, adds an observation to m. +// The observation added to m is the number of seconds ellapsed since addTimeSample +// was initially called. addTimeSample is meant to be called in a defer to calculate +// the amount of time a function takes to complete. +func addTimeSample(m metrics.Histogram) func() { + start := time.Now() + return func() { m.Observe(time.Since(start).Seconds()) } +} diff --git a/proxy/app_conn_test.go b/internal/proxy/app_conn_test.go similarity index 88% rename from proxy/app_conn_test.go rename to internal/proxy/app_conn_test.go index 458088635..f1ae7fe1a 100644 --- a/proxy/app_conn_test.go +++ b/internal/proxy/app_conn_test.go @@ -6,7 +6,7 @@ import ( "strings" "testing" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/abci/types" @@ -17,20 +17,20 @@ import ( //---------------------------------------- type appConnTestI interface { - EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) + EchoAsync(ctx context.Context, msg string) (*abciclient.ReqRes, error) FlushSync(context.Context) error InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error) } type appConnTest struct { - appConn abcicli.Client + appConn abciclient.Client } -func newAppConnTest(appConn abcicli.Client) appConnTestI { +func newAppConnTest(appConn abciclient.Client) appConnTestI { return &appConnTest{appConn} } -func (app *appConnTest) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) { +func (app *appConnTest) EchoAsync(ctx context.Context, msg string) (*abciclient.ReqRes, error) { return app.appConn.EchoAsync(ctx, msg) } @@ -48,7 +48,7 @@ var SOCKET = "socket" func TestEcho(t *testing.T) { sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) + clientCreator := abciclient.NewRemoteCreator(sockPath, SOCKET, true) // Start server s := server.NewSocketServer(sockPath, kvstore.NewApplication()) @@ -63,7 +63,7 @@ func TestEcho(t *testing.T) { }) // Start client - cli, err := clientCreator.NewABCIClient() + cli, err := clientCreator() if err != nil { t.Fatalf("Error creating ABCI client: %v", err.Error()) } @@ -96,7 +96,7 @@ func TestEcho(t *testing.T) { func BenchmarkEcho(b *testing.B) { b.StopTimer() // Initialize sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) + clientCreator := abciclient.NewRemoteCreator(sockPath, SOCKET, true) // Start server s := server.NewSocketServer(sockPath, kvstore.NewApplication()) @@ -111,7 +111,7 @@ func BenchmarkEcho(b *testing.B) { }) // Start client - cli, err := clientCreator.NewABCIClient() + cli, err := clientCreator() if err != nil { b.Fatalf("Error creating ABCI client: %v", err.Error()) } @@ -149,7 +149,7 @@ func BenchmarkEcho(b *testing.B) { func TestInfo(t *testing.T) { sockPath := fmt.Sprintf("unix:///tmp/echo_%v.sock", tmrand.Str(6)) - clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) + clientCreator := abciclient.NewRemoteCreator(sockPath, SOCKET, true) // Start server s := server.NewSocketServer(sockPath, kvstore.NewApplication()) @@ -164,7 +164,7 @@ func TestInfo(t *testing.T) { }) // Start client - cli, err := clientCreator.NewABCIClient() + cli, err := clientCreator() if err != nil { t.Fatalf("Error creating ABCI client: %v", err.Error()) } diff --git a/internal/proxy/client.go b/internal/proxy/client.go new file mode 100644 index 000000000..ddb9a928d --- /dev/null +++ b/internal/proxy/client.go @@ -0,0 +1,41 @@ +package proxy + +import ( + "io" + + abciclient "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/abci/types" + e2e "github.com/tendermint/tendermint/test/e2e/app" +) + +// DefaultClientCreator returns a default ClientCreator, which will create a +// local client if addr is one of: 'kvstore', +// 'persistent_kvstore', 'e2e', or 'noop', otherwise - a remote client. +// +// The Closer is a noop except for persistent_kvstore applications, +// which will clean up the store. +func DefaultClientCreator(addr, transport, dbDir string) (abciclient.Creator, io.Closer) { + switch addr { + case "kvstore": + return abciclient.NewLocalCreator(kvstore.NewApplication()), noopCloser{} + case "persistent_kvstore": + app := kvstore.NewPersistentKVStoreApplication(dbDir) + return abciclient.NewLocalCreator(app), app + case "e2e": + app, err := e2e.NewApplication(e2e.DefaultConfig(dbDir)) + if err != nil { + panic(err) + } + return abciclient.NewLocalCreator(app), noopCloser{} + case "noop": + return abciclient.NewLocalCreator(types.NewBaseApplication()), noopCloser{} + default: + mustConnect := false // loop retrying + return abciclient.NewRemoteCreator(addr, transport, mustConnect), noopCloser{} + } +} + +type noopCloser struct{} + +func (noopCloser) Close() error { return nil } diff --git a/internal/proxy/metrics.go b/internal/proxy/metrics.go new file mode 100644 index 000000000..99bd7d7b0 --- /dev/null +++ b/internal/proxy/metrics.go @@ -0,0 +1,47 @@ +package proxy + +import ( + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/discard" + "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +const ( + // MetricsSubsystem is a subsystem shared by all metrics exposed by this + // package. + MetricsSubsystem = "abci_connection" +) + +// Metrics contains the prometheus metrics exposed by the proxy package. +type Metrics struct { + MethodTiming metrics.Histogram +} + +// PrometheusMetrics constructs a Metrics instance that collects metrics samples. +// The resulting metrics will be prefixed with namespace and labeled with the +// defaultLabelsAndValues. defaultLabelsAndValues must be a list of string pairs +// where the first of each pair is the label and the second is the value. +func PrometheusMetrics(namespace string, defaultLabelsAndValues ...string) *Metrics { + defaultLabels := []string{} + for i := 0; i < len(defaultLabelsAndValues); i += 2 { + defaultLabels = append(defaultLabels, defaultLabelsAndValues[i]) + } + return &Metrics{ + MethodTiming: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "method_timing", + Help: "ABCI Method Timing", + Buckets: []float64{.0001, .0004, .002, .009, .02, .1, .65, 2, 6, 25}, + }, append(defaultLabels, []string{"method", "type"}...)).With(defaultLabelsAndValues...), + } +} + +// NopMetrics constructs a Metrics instance that discards all samples and is suitable +// for testing. +func NopMetrics() *Metrics { + return &Metrics{ + MethodTiming: discard.NewHistogram(), + } +} diff --git a/proxy/mocks/app_conn_consensus.go b/internal/proxy/mocks/app_conn_consensus.go similarity index 92% rename from proxy/mocks/app_conn_consensus.go rename to internal/proxy/mocks/app_conn_consensus.go index 03207706e..fa93b0931 100644 --- a/proxy/mocks/app_conn_consensus.go +++ b/internal/proxy/mocks/app_conn_consensus.go @@ -5,7 +5,7 @@ package mocks import ( context "context" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" mock "github.com/stretchr/testify/mock" @@ -64,15 +64,15 @@ func (_m *AppConnConsensus) CommitSync(_a0 context.Context) (*types.ResponseComm } // DeliverTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) { +func (_m *AppConnConsensus) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -147,6 +147,6 @@ func (_m *AppConnConsensus) InitChainSync(_a0 context.Context, _a1 types.Request } // SetResponseCallback provides a mock function with given fields: _a0 -func (_m *AppConnConsensus) SetResponseCallback(_a0 abcicli.Callback) { +func (_m *AppConnConsensus) SetResponseCallback(_a0 abciclient.Callback) { _m.Called(_a0) } diff --git a/proxy/mocks/app_conn_mempool.go b/internal/proxy/mocks/app_conn_mempool.go similarity index 83% rename from proxy/mocks/app_conn_mempool.go rename to internal/proxy/mocks/app_conn_mempool.go index 2505160d6..5429d8f90 100644 --- a/proxy/mocks/app_conn_mempool.go +++ b/internal/proxy/mocks/app_conn_mempool.go @@ -5,7 +5,7 @@ package mocks import ( context "context" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" mock "github.com/stretchr/testify/mock" @@ -18,15 +18,15 @@ type AppConnMempool struct { } // CheckTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnMempool) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) { +func (_m *AppConnMempool) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abciclient.ReqRes, error) { ret := _m.Called(_a0, _a1) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abciclient.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -78,15 +78,15 @@ func (_m *AppConnMempool) Error() error { } // FlushAsync provides a mock function with given fields: _a0 -func (_m *AppConnMempool) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) { +func (_m *AppConnMempool) FlushAsync(_a0 context.Context) (*abciclient.ReqRes, error) { ret := _m.Called(_a0) - var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context) *abcicli.ReqRes); ok { + var r0 *abciclient.ReqRes + if rf, ok := ret.Get(0).(func(context.Context) *abciclient.ReqRes); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*abcicli.ReqRes) + r0 = ret.Get(0).(*abciclient.ReqRes) } } @@ -115,6 +115,6 @@ func (_m *AppConnMempool) FlushSync(_a0 context.Context) error { } // SetResponseCallback provides a mock function with given fields: _a0 -func (_m *AppConnMempool) SetResponseCallback(_a0 abcicli.Callback) { +func (_m *AppConnMempool) SetResponseCallback(_a0 abciclient.Callback) { _m.Called(_a0) } diff --git a/proxy/mocks/app_conn_query.go b/internal/proxy/mocks/app_conn_query.go similarity index 100% rename from proxy/mocks/app_conn_query.go rename to internal/proxy/mocks/app_conn_query.go diff --git a/proxy/mocks/app_conn_snapshot.go b/internal/proxy/mocks/app_conn_snapshot.go similarity index 100% rename from proxy/mocks/app_conn_snapshot.go rename to internal/proxy/mocks/app_conn_snapshot.go diff --git a/proxy/multi_app_conn.go b/internal/proxy/multi_app_conn.go similarity index 84% rename from proxy/multi_app_conn.go rename to internal/proxy/multi_app_conn.go index 369b685ba..0bcc64af6 100644 --- a/proxy/multi_app_conn.go +++ b/internal/proxy/multi_app_conn.go @@ -5,7 +5,7 @@ import ( "os" "syscall" - abcicli "github.com/tendermint/tendermint/abci/client" + abciclient "github.com/tendermint/tendermint/abci/client" tmlog "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" ) @@ -33,8 +33,8 @@ type AppConns interface { } // NewAppConns calls NewMultiAppConn. -func NewAppConns(clientCreator ClientCreator) AppConns { - return NewMultiAppConn(clientCreator) +func NewAppConns(clientCreator abciclient.Creator, metrics *Metrics) AppConns { + return NewMultiAppConn(clientCreator, metrics) } // multiAppConn implements AppConns. @@ -45,22 +45,24 @@ func NewAppConns(clientCreator ClientCreator) AppConns { type multiAppConn struct { service.BaseService + metrics *Metrics consensusConn AppConnConsensus mempoolConn AppConnMempool queryConn AppConnQuery snapshotConn AppConnSnapshot - consensusConnClient abcicli.Client - mempoolConnClient abcicli.Client - queryConnClient abcicli.Client - snapshotConnClient abcicli.Client + consensusConnClient abciclient.Client + mempoolConnClient abciclient.Client + queryConnClient abciclient.Client + snapshotConnClient abciclient.Client - clientCreator ClientCreator + clientCreator abciclient.Creator } // NewMultiAppConn makes all necessary abci connections to the application. -func NewMultiAppConn(clientCreator ClientCreator) AppConns { +func NewMultiAppConn(clientCreator abciclient.Creator, metrics *Metrics) AppConns { multiAppConn := &multiAppConn{ + metrics: metrics, clientCreator: clientCreator, } multiAppConn.BaseService = *service.NewBaseService(nil, "multiAppConn", multiAppConn) @@ -89,7 +91,7 @@ func (app *multiAppConn) OnStart() error { return err } app.queryConnClient = c - app.queryConn = NewAppConnQuery(c) + app.queryConn = NewAppConnQuery(c, app.metrics) c, err = app.abciClientFor(connSnapshot) if err != nil { @@ -97,7 +99,7 @@ func (app *multiAppConn) OnStart() error { return err } app.snapshotConnClient = c - app.snapshotConn = NewAppConnSnapshot(c) + app.snapshotConn = NewAppConnSnapshot(c, app.metrics) c, err = app.abciClientFor(connMempool) if err != nil { @@ -105,7 +107,7 @@ func (app *multiAppConn) OnStart() error { return err } app.mempoolConnClient = c - app.mempoolConn = NewAppConnMempool(c) + app.mempoolConn = NewAppConnMempool(c, app.metrics) c, err = app.abciClientFor(connConsensus) if err != nil { @@ -113,7 +115,7 @@ func (app *multiAppConn) OnStart() error { return err } app.consensusConnClient = c - app.consensusConn = NewAppConnConsensus(c) + app.consensusConn = NewAppConnConsensus(c, app.metrics) // Kill Tendermint if the ABCI application crashes. go app.killTMOnClientError() @@ -178,8 +180,8 @@ func (app *multiAppConn) stopAllClients() { } } -func (app *multiAppConn) abciClientFor(conn string) (abcicli.Client, error) { - c, err := app.clientCreator.NewABCIClient() +func (app *multiAppConn) abciClientFor(conn string) (abciclient.Client, error) { + c, err := app.clientCreator() if err != nil { return nil, fmt.Errorf("error creating ABCI client (%s connection): %w", conn, err) } diff --git a/proxy/multi_app_conn_test.go b/internal/proxy/multi_app_conn_test.go similarity index 79% rename from proxy/multi_app_conn_test.go rename to internal/proxy/multi_app_conn_test.go index 34b0d0830..25ed692ab 100644 --- a/proxy/multi_app_conn_test.go +++ b/internal/proxy/multi_app_conn_test.go @@ -8,27 +8,30 @@ import ( "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + abciclient "github.com/tendermint/tendermint/abci/client" abcimocks "github.com/tendermint/tendermint/abci/client/mocks" - "github.com/tendermint/tendermint/proxy/mocks" ) func TestAppConns_Start_Stop(t *testing.T) { quitCh := make(<-chan struct{}) - clientCreatorMock := &mocks.ClientCreator{} - clientMock := &abcimocks.Client{} clientMock.On("SetLogger", mock.Anything).Return().Times(4) clientMock.On("Start").Return(nil).Times(4) clientMock.On("Stop").Return(nil).Times(4) clientMock.On("Quit").Return(quitCh).Times(4) - clientCreatorMock.On("NewABCIClient").Return(clientMock, nil).Times(4) + creatorCallCount := 0 + creator := func() (abciclient.Client, error) { + creatorCallCount++ + return clientMock, nil + } - appConns := NewAppConns(clientCreatorMock) + appConns := NewAppConns(creator, NopMetrics()) err := appConns.Start() require.NoError(t, err) @@ -39,6 +42,7 @@ func TestAppConns_Start_Stop(t *testing.T) { require.NoError(t, err) clientMock.AssertExpectations(t) + assert.Equal(t, 4, creatorCallCount) } // Upon failure, we call tmos.Kill @@ -56,8 +60,6 @@ func TestAppConns_Failure(t *testing.T) { var recvQuitCh <-chan struct{} // nolint:gosimple recvQuitCh = quitCh - clientCreatorMock := &mocks.ClientCreator{} - clientMock := &abcimocks.Client{} clientMock.On("SetLogger", mock.Anything).Return() clientMock.On("Start").Return(nil) @@ -66,9 +68,11 @@ func TestAppConns_Failure(t *testing.T) { clientMock.On("Quit").Return(recvQuitCh) clientMock.On("Error").Return(errors.New("EOF")).Once() - clientCreatorMock.On("NewABCIClient").Return(clientMock, nil) + creator := func() (abciclient.Client, error) { + return clientMock, nil + } - appConns := NewAppConns(clientCreatorMock) + appConns := NewAppConns(creator, NopMetrics()) err := appConns.Start() require.NoError(t, err) diff --git a/proxy/version.go b/internal/proxy/version.go similarity index 100% rename from proxy/version.go rename to internal/proxy/version.go diff --git a/rpc/core/CONTRIBUTING.md b/internal/rpc/core/CONTRIBUTING.md similarity index 100% rename from rpc/core/CONTRIBUTING.md rename to internal/rpc/core/CONTRIBUTING.md diff --git a/rpc/core/README.md b/internal/rpc/core/README.md similarity index 100% rename from rpc/core/README.md rename to internal/rpc/core/README.md diff --git a/rpc/core/abci.go b/internal/rpc/core/abci.go similarity index 67% rename from rpc/core/abci.go rename to internal/rpc/core/abci.go index 613eaec8b..06c033050 100644 --- a/rpc/core/abci.go +++ b/internal/rpc/core/abci.go @@ -2,9 +2,9 @@ package core import ( abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/proxy" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) @@ -16,7 +16,7 @@ func (env *Environment) ABCIQuery( data bytes.HexBytes, height int64, prove bool, -) (*ctypes.ResultABCIQuery, error) { +) (*coretypes.ResultABCIQuery, error) { resQuery, err := env.ProxyAppQuery.QuerySync(ctx.Context(), abci.RequestQuery{ Path: path, Data: data, @@ -26,16 +26,17 @@ func (env *Environment) ABCIQuery( if err != nil { return nil, err } - env.Logger.Info("ABCIQuery", "path", path, "data", data, "result", resQuery) - return &ctypes.ResultABCIQuery{Response: *resQuery}, nil + + return &coretypes.ResultABCIQuery{Response: *resQuery}, nil } // ABCIInfo gets some info about the application. // More: https://docs.tendermint.com/master/rpc/#/ABCI/abci_info -func (env *Environment) ABCIInfo(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { +func (env *Environment) ABCIInfo(ctx *rpctypes.Context) (*coretypes.ResultABCIInfo, error) { resInfo, err := env.ProxyAppQuery.InfoSync(ctx.Context(), proxy.RequestInfo) if err != nil { return nil, err } - return &ctypes.ResultABCIInfo{Response: *resInfo}, nil + + return &coretypes.ResultABCIInfo{Response: *resInfo}, nil } diff --git a/rpc/core/blocks.go b/internal/rpc/core/blocks.go similarity index 80% rename from rpc/core/blocks.go rename to internal/rpc/core/blocks.go index 081276d0f..26472fab4 100644 --- a/rpc/core/blocks.go +++ b/internal/rpc/core/blocks.go @@ -4,11 +4,12 @@ import ( "fmt" "sort" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) @@ -24,7 +25,7 @@ import ( // More: https://docs.tendermint.com/master/rpc/#/Info/blockchain func (env *Environment) BlockchainInfo( ctx *rpctypes.Context, - minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { + minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { const limit int64 = 20 @@ -48,7 +49,7 @@ func (env *Environment) BlockchainInfo( } } - return &ctypes.ResultBlockchainInfo{ + return &coretypes.ResultBlockchainInfo{ LastHeight: env.BlockStore.Height(), BlockMetas: blockMetas}, nil } @@ -59,7 +60,7 @@ func (env *Environment) BlockchainInfo( func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { // filter negatives if min < 0 || max < 0 { - return min, max, ctypes.ErrZeroOrNegativeHeight + return min, max, coretypes.ErrZeroOrNegativeHeight } // adjust for default values @@ -82,7 +83,7 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { if min > max { return min, max, fmt.Errorf("%w: min height %d can't be greater than max height %d", - ctypes.ErrInvalidRequest, min, max) + coretypes.ErrInvalidRequest, min, max) } return min, max, nil } @@ -90,7 +91,7 @@ func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { // Block gets block at a given height. // If no height is provided, it will fetch the latest block. // More: https://docs.tendermint.com/master/rpc/#/Info/block -func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) { +func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*coretypes.ResultBlock, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err @@ -98,29 +99,33 @@ func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes. blockMeta := env.BlockStore.LoadBlockMeta(height) if blockMeta == nil { - return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil + return &coretypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil } block := env.BlockStore.LoadBlock(height) - return &ctypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil + return &coretypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil } // BlockByHash gets block by hash. // More: https://docs.tendermint.com/master/rpc/#/Info/block_by_hash -func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) { +func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { + // N.B. The hash parameter is HexBytes so that the reflective parameter + // decoding logic in the HTTP service will correctly translate from JSON. + // See https://github.com/tendermint/tendermint/issues/6802 for context. + block := env.BlockStore.LoadBlockByHash(hash) if block == nil { - return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil + return &coretypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil } // If block is not nil, then blockMeta can't be nil. blockMeta := env.BlockStore.LoadBlockMeta(block.Height) - return &ctypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil + return &coretypes.ResultBlock{BlockID: blockMeta.BlockID, Block: block}, nil } // Commit gets block commit at a given height. // If no height is provided, it will fetch the commit for the latest block. // More: https://docs.tendermint.com/master/rpc/#/Info/commit -func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) { +func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*coretypes.ResultCommit, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err @@ -139,7 +144,7 @@ func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes // NOTE: we can't yet ensure atomicity of operations in asserting // whether this is the latest height and retrieving the seen commit if commit != nil && commit.Height == height { - return ctypes.NewResultCommit(&header, commit, false), nil + return coretypes.NewResultCommit(&header, commit, false), nil } } @@ -148,7 +153,7 @@ func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes if commit == nil { return nil, nil } - return ctypes.NewResultCommit(&header, commit, true), nil + return coretypes.NewResultCommit(&header, commit, true), nil } // BlockResults gets ABCIResults at a given height. @@ -158,7 +163,7 @@ func (env *Environment) Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes // Thus response.results.deliver_tx[5] is the results of executing // getBlock(h).Txs[5] // More: https://docs.tendermint.com/master/rpc/#/Info/block_results -func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { +func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*coretypes.ResultBlockResults, error) { height, err := env.getHeight(env.BlockStore.Height(), heightPtr) if err != nil { return nil, err @@ -174,7 +179,7 @@ func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (* totalGasUsed += tx.GetGasUsed() } - return &ctypes.ResultBlockResults{ + return &coretypes.ResultBlockResults{ Height: height, TxsResults: results.DeliverTxs, TotalGasUsed: totalGasUsed, @@ -192,7 +197,7 @@ func (env *Environment) BlockSearch( query string, pagePtr, perPagePtr *int, orderBy string, -) (*ctypes.ResultBlockSearch, error) { +) (*coretypes.ResultBlockSearch, error) { if !indexer.KVSinkEnabled(env.EventSinks) { return nil, fmt.Errorf("block searching is disabled due to no kvEventSink") @@ -224,7 +229,7 @@ func (env *Environment) BlockSearch( sort.Slice(results, func(i, j int) bool { return results[i] < results[j] }) default: - return nil, fmt.Errorf("expected order_by to be either `asc` or `desc` or empty: %w", ctypes.ErrInvalidRequest) + return nil, fmt.Errorf("expected order_by to be either `asc` or `desc` or empty: %w", coretypes.ErrInvalidRequest) } // paginate results @@ -239,13 +244,13 @@ func (env *Environment) BlockSearch( skipCount := validateSkipCount(page, perPage) pageSize := tmmath.MinInt(perPage, totalCount-skipCount) - apiResults := make([]*ctypes.ResultBlock, 0, pageSize) + apiResults := make([]*coretypes.ResultBlock, 0, pageSize) for i := skipCount; i < skipCount+pageSize; i++ { block := env.BlockStore.LoadBlock(results[i]) if block != nil { blockMeta := env.BlockStore.LoadBlockMeta(block.Height) if blockMeta != nil { - apiResults = append(apiResults, &ctypes.ResultBlock{ + apiResults = append(apiResults, &coretypes.ResultBlock{ Block: block, BlockID: blockMeta.BlockID, }) @@ -253,5 +258,5 @@ func (env *Environment) BlockSearch( } } - return &ctypes.ResultBlockSearch{Blocks: apiResults, TotalCount: totalCount}, nil + return &coretypes.ResultBlockSearch{Blocks: apiResults, TotalCount: totalCount}, nil } diff --git a/rpc/core/blocks_test.go b/internal/rpc/core/blocks_test.go similarity index 95% rename from rpc/core/blocks_test.go rename to internal/rpc/core/blocks_test.go index 29db2f094..68237bc0b 100644 --- a/rpc/core/blocks_test.go +++ b/internal/rpc/core/blocks_test.go @@ -10,10 +10,10 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + sm "github.com/tendermint/tendermint/internal/state" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -89,12 +89,12 @@ func TestBlockResults(t *testing.T) { testCases := []struct { height int64 wantErr bool - wantRes *ctypes.ResultBlockResults + wantRes *coretypes.ResultBlockResults }{ {-1, true, nil}, {0, true, nil}, {101, true, nil}, - {100, false, &ctypes.ResultBlockResults{ + {100, false, &coretypes.ResultBlockResults{ Height: 100, TxsResults: results.DeliverTxs, TotalGasUsed: 15, diff --git a/rpc/core/consensus.go b/internal/rpc/core/consensus.go similarity index 74% rename from rpc/core/consensus.go rename to internal/rpc/core/consensus.go index 1767c4b35..d17796fff 100644 --- a/rpc/core/consensus.go +++ b/internal/rpc/core/consensus.go @@ -1,11 +1,9 @@ package core import ( - cm "github.com/tendermint/tendermint/internal/consensus" tmmath "github.com/tendermint/tendermint/libs/math" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" ) // Validators gets the validator set at the given block height. @@ -18,7 +16,7 @@ import ( func (env *Environment) Validators( ctx *rpctypes.Context, heightPtr *int64, - pagePtr, perPagePtr *int) (*ctypes.ResultValidators, error) { + pagePtr, perPagePtr *int) (*coretypes.ResultValidators, error) { // The latest validator that we know is the NextValidator of the last block. height, err := env.getHeight(env.latestUncommittedHeight(), heightPtr) @@ -42,7 +40,7 @@ func (env *Environment) Validators( v := validators.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] - return &ctypes.ResultValidators{ + return &coretypes.ResultValidators{ BlockHeight: height, Validators: v, Count: len(v), @@ -52,32 +50,40 @@ func (env *Environment) Validators( // DumpConsensusState dumps consensus state. // UNSTABLE // More: https://docs.tendermint.com/master/rpc/#/Info/dump_consensus_state -func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { +func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*coretypes.ResultDumpConsensusState, error) { // Get Peer consensus states. - peers := env.P2PPeers.Peers().List() - peerStates := make([]ctypes.PeerStateInfo, len(peers)) - for i, peer := range peers { - peerState, ok := peer.Get(types.PeerStateKey).(*cm.PeerState) - if !ok { // peer does not have a state yet + + var peerStates []coretypes.PeerStateInfo + peers := env.PeerManager.Peers() + peerStates = make([]coretypes.PeerStateInfo, 0, len(peers)) + for _, pid := range peers { + peerState, ok := env.ConsensusReactor.GetPeerState(pid) + if !ok { continue } + peerStateJSON, err := peerState.ToJSON() if err != nil { return nil, err } - peerStates[i] = ctypes.PeerStateInfo{ - // Peer basic info. - NodeAddress: peer.SocketAddr().String(), - // Peer consensus state. - PeerState: peerStateJSON, + + addr := env.PeerManager.Addresses(pid) + if len(addr) != 0 { + peerStates = append(peerStates, coretypes.PeerStateInfo{ + // Peer basic info. + NodeAddress: addr[0].String(), + // Peer consensus state. + PeerState: peerStateJSON, + }) } } + // Get self round state. roundState, err := env.ConsensusState.GetRoundStateJSON() if err != nil { return nil, err } - return &ctypes.ResultDumpConsensusState{ + return &coretypes.ResultDumpConsensusState{ RoundState: roundState, Peers: peerStates}, nil } @@ -85,10 +91,10 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.Resul // ConsensusState returns a concise summary of the consensus state. // UNSTABLE // More: https://docs.tendermint.com/master/rpc/#/Info/consensus_state -func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) { +func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*coretypes.ResultConsensusState, error) { // Get self round state. bz, err := env.ConsensusState.GetRoundStateSimpleJSON() - return &ctypes.ResultConsensusState{RoundState: bz}, err + return &coretypes.ResultConsensusState{RoundState: bz}, err } // ConsensusParams gets the consensus parameters at the given block height. @@ -96,7 +102,7 @@ func (env *Environment) GetConsensusState(ctx *rpctypes.Context) (*ctypes.Result // More: https://docs.tendermint.com/master/rpc/#/Info/consensus_params func (env *Environment) ConsensusParams( ctx *rpctypes.Context, - heightPtr *int64) (*ctypes.ResultConsensusParams, error) { + heightPtr *int64) (*coretypes.ResultConsensusParams, error) { // The latest consensus params that we know is the consensus params after the // last block. @@ -110,7 +116,7 @@ func (env *Environment) ConsensusParams( return nil, err } - return &ctypes.ResultConsensusParams{ + return &coretypes.ResultConsensusParams{ BlockHeight: height, ConsensusParams: consensusParams}, nil } diff --git a/rpc/core/dev.go b/internal/rpc/core/dev.go similarity index 61% rename from rpc/core/dev.go rename to internal/rpc/core/dev.go index 0e365cdcc..21c5154ff 100644 --- a/rpc/core/dev.go +++ b/internal/rpc/core/dev.go @@ -1,12 +1,12 @@ package core import ( - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // UnsafeFlushMempool removes all transactions from the mempool. -func (env *Environment) UnsafeFlushMempool(ctx *rpctypes.Context) (*ctypes.ResultUnsafeFlushMempool, error) { +func (env *Environment) UnsafeFlushMempool(ctx *rpctypes.Context) (*coretypes.ResultUnsafeFlushMempool, error) { env.Mempool.Flush() - return &ctypes.ResultUnsafeFlushMempool{}, nil + return &coretypes.ResultUnsafeFlushMempool{}, nil } diff --git a/rpc/core/doc.go b/internal/rpc/core/doc.go similarity index 100% rename from rpc/core/doc.go rename to internal/rpc/core/doc.go diff --git a/rpc/core/doc_template.txt b/internal/rpc/core/doc_template.txt similarity index 100% rename from rpc/core/doc_template.txt rename to internal/rpc/core/doc_template.txt diff --git a/rpc/core/env.go b/internal/rpc/core/env.go similarity index 71% rename from rpc/core/env.go rename to internal/rpc/core/env.go index eb7232c01..f05c34f14 100644 --- a/rpc/core/env.go +++ b/internal/rpc/core/env.go @@ -5,17 +5,18 @@ import ( "fmt" "time" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/internal/consensus" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/statesync" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/proxy" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" + "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) @@ -36,7 +37,7 @@ const ( //---------------------------------------------- // These interfaces are used by RPC and must be thread safe -type Consensus interface { +type consensusState interface { GetState() sm.State GetValidators() (int64, []*types.Validator) GetLastHeight() int64 @@ -50,12 +51,14 @@ type transport interface { NodeInfo() types.NodeInfo } -type peers interface { - AddPersistentPeers([]string) error - AddUnconditionalPeerIDs([]string) error - AddPrivatePeerIDs([]string) error - DialPeersAsync([]string) error - Peers() p2p.IPeerSet +type consensusReactor interface { + WaitSync() bool + GetPeerState(peerID types.NodeID) (*consensus.PeerState, bool) +} + +type peerManager interface { + Peers() []types.NodeID + Addresses(types.NodeID) []p2p.NodeAddress } //---------------------------------------------- @@ -67,25 +70,30 @@ type Environment struct { ProxyAppMempool proxy.AppConnMempool // interfaces defined in types and above - StateStore sm.Store - BlockStore sm.BlockStore - EvidencePool sm.EvidencePool - ConsensusState Consensus - P2PPeers peers - P2PTransport transport + StateStore sm.Store + BlockStore sm.BlockStore + EvidencePool sm.EvidencePool + ConsensusState consensusState + ConsensusReactor consensusReactor + + // Legacy p2p stack + P2PTransport transport + + // interfaces for new p2p interfaces + PeerManager peerManager // objects - PubKey crypto.PubKey - GenDoc *types.GenesisDoc // cache the genesis structure - EventSinks []indexer.EventSink - ConsensusReactor *consensus.Reactor - EventBus *types.EventBus // thread safe - Mempool mempl.Mempool - BlockSyncReactor consensus.BlockSyncReactor + PubKey crypto.PubKey + GenDoc *types.GenesisDoc // cache the genesis structure + EventSinks []indexer.EventSink + EventBus *types.EventBus // thread safe + Mempool mempool.Mempool + BlockSyncReactor consensus.BlockSyncReactor + StateSyncMetricer statesync.Metricer Logger log.Logger - Config cfg.RPCConfig + Config config.RPCConfig // cache of chunked genesis data. genChunks []string @@ -96,7 +104,7 @@ type Environment struct { func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { // this can only happen if we haven't first run validatePerPage if perPage < 1 { - panic(fmt.Errorf("%w (%d)", ctypes.ErrZeroOrNegativePerPage, perPage)) + panic(fmt.Errorf("%w (%d)", coretypes.ErrZeroOrNegativePerPage, perPage)) } if pagePtr == nil { // no page parameter @@ -109,7 +117,7 @@ func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { } page := *pagePtr if page <= 0 || page > pages { - return 1, fmt.Errorf("%w expected range: [1, %d], given %d", ctypes.ErrPageOutOfRange, pages, page) + return 1, fmt.Errorf("%w expected range: [1, %d], given %d", coretypes.ErrPageOutOfRange, pages, page) } return page, nil @@ -174,15 +182,15 @@ func (env *Environment) getHeight(latestHeight int64, heightPtr *int64) (int64, if heightPtr != nil { height := *heightPtr if height <= 0 { - return 0, fmt.Errorf("%w (requested height: %d)", ctypes.ErrZeroOrNegativeHeight, height) + return 0, fmt.Errorf("%w (requested height: %d)", coretypes.ErrZeroOrNegativeHeight, height) } if height > latestHeight { return 0, fmt.Errorf("%w (requested height: %d, blockchain height: %d)", - ctypes.ErrHeightExceedsChainHead, height, latestHeight) + coretypes.ErrHeightExceedsChainHead, height, latestHeight) } base := env.BlockStore.Base() if height < base { - return 0, fmt.Errorf("%w (requested height: %d, base height: %d)", ctypes.ErrHeightNotAvailable, height, base) + return 0, fmt.Errorf("%w (requested height: %d, base height: %d)", coretypes.ErrHeightNotAvailable, height, base) } return height, nil } diff --git a/rpc/core/env_test.go b/internal/rpc/core/env_test.go similarity index 100% rename from rpc/core/env_test.go rename to internal/rpc/core/env_test.go diff --git a/rpc/core/events.go b/internal/rpc/core/events.go similarity index 89% rename from rpc/core/events.go rename to internal/rpc/core/events.go index e56295c52..8632e00c1 100644 --- a/rpc/core/events.go +++ b/internal/rpc/core/events.go @@ -7,7 +7,7 @@ import ( tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) @@ -18,7 +18,7 @@ const ( // Subscribe for events via WebSocket. // More: https://docs.tendermint.com/master/rpc/#/Websocket/subscribe -func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { +func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*coretypes.ResultSubscribe, error) { addr := ctx.RemoteAddr() if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients { @@ -49,7 +49,7 @@ func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*ctypes. select { case msg := <-sub.Out(): var ( - resultEvent = &ctypes.ResultEvent{Query: query, Data: msg.Data(), Events: msg.Events()} + resultEvent = &coretypes.ResultEvent{Query: query, Data: msg.Data(), Events: msg.Events()} resp = rpctypes.NewRPCSuccessResponse(subscriptionID, resultEvent) ) writeCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -80,12 +80,12 @@ func (env *Environment) Subscribe(ctx *rpctypes.Context, query string) (*ctypes. } }() - return &ctypes.ResultSubscribe{}, nil + return &coretypes.ResultSubscribe{}, nil } // Unsubscribe from events via WebSocket. // More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe -func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) { +func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*coretypes.ResultUnsubscribe, error) { args := tmpubsub.UnsubscribeArgs{Subscriber: ctx.RemoteAddr()} env.Logger.Info("Unsubscribe from query", "remote", args.Subscriber, "subscription", query) @@ -100,17 +100,17 @@ func (env *Environment) Unsubscribe(ctx *rpctypes.Context, query string) (*ctype if err != nil { return nil, err } - return &ctypes.ResultUnsubscribe{}, nil + return &coretypes.ResultUnsubscribe{}, nil } // UnsubscribeAll from all events via WebSocket. // More: https://docs.tendermint.com/master/rpc/#/Websocket/unsubscribe_all -func (env *Environment) UnsubscribeAll(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) { +func (env *Environment) UnsubscribeAll(ctx *rpctypes.Context) (*coretypes.ResultUnsubscribe, error) { addr := ctx.RemoteAddr() env.Logger.Info("Unsubscribe from all", "remote", addr) err := env.EventBus.UnsubscribeAll(ctx.Context(), addr) if err != nil { return nil, err } - return &ctypes.ResultUnsubscribe{}, nil + return &coretypes.ResultUnsubscribe{}, nil } diff --git a/rpc/core/evidence.go b/internal/rpc/core/evidence.go similarity index 69% rename from rpc/core/evidence.go rename to internal/rpc/core/evidence.go index e071c5a7e..a7641b99d 100644 --- a/rpc/core/evidence.go +++ b/internal/rpc/core/evidence.go @@ -3,7 +3,7 @@ package core import ( "fmt" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -12,10 +12,10 @@ import ( // More: https://docs.tendermint.com/master/rpc/#/Evidence/broadcast_evidence func (env *Environment) BroadcastEvidence( ctx *rpctypes.Context, - ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { + ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { if ev == nil { - return nil, fmt.Errorf("%w: no evidence was provided", ctypes.ErrInvalidRequest) + return nil, fmt.Errorf("%w: no evidence was provided", coretypes.ErrInvalidRequest) } if err := ev.ValidateBasic(); err != nil { @@ -25,5 +25,5 @@ func (env *Environment) BroadcastEvidence( if err := env.EvidencePool.AddEvidence(ev); err != nil { return nil, fmt.Errorf("failed to add evidence: %w", err) } - return &ctypes.ResultBroadcastEvidence{Hash: ev.Hash()}, nil + return &coretypes.ResultBroadcastEvidence{Hash: ev.Hash()}, nil } diff --git a/rpc/core/health.go b/internal/rpc/core/health.go similarity index 59% rename from rpc/core/health.go rename to internal/rpc/core/health.go index 855911d83..fc355c7e7 100644 --- a/rpc/core/health.go +++ b/internal/rpc/core/health.go @@ -1,13 +1,13 @@ package core import ( - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // Health gets node health. Returns empty result (200 OK) on success, no // response - in case of an error. // More: https://docs.tendermint.com/master/rpc/#/Info/health -func (env *Environment) Health(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) { - return &ctypes.ResultHealth{}, nil +func (env *Environment) Health(ctx *rpctypes.Context) (*coretypes.ResultHealth, error) { + return &coretypes.ResultHealth{}, nil } diff --git a/internal/rpc/core/mempool.go b/internal/rpc/core/mempool.go new file mode 100644 index 000000000..5b12a6c2b --- /dev/null +++ b/internal/rpc/core/mempool.go @@ -0,0 +1,157 @@ +package core + +import ( + "errors" + "fmt" + "math/rand" + "time" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/rpc/coretypes" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" + "github.com/tendermint/tendermint/types" +) + +//----------------------------------------------------------------------------- +// NOTE: tx should be signed, but this is only checked at the app level (not by Tendermint!) + +// BroadcastTxAsync returns right away, with no response. Does not wait for +// CheckTx nor DeliverTx results. +// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async +func (env *Environment) BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { + err := env.Mempool.CheckTx(ctx.Context(), tx, nil, mempool.TxInfo{}) + if err != nil { + return nil, err + } + + return &coretypes.ResultBroadcastTx{Hash: tx.Hash()}, nil +} + +// BroadcastTxSync returns with the response from CheckTx. Does not wait for +// DeliverTx result. +// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_sync +func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { + resCh := make(chan *abci.Response, 1) + err := env.Mempool.CheckTx( + ctx.Context(), + tx, + func(res *abci.Response) { resCh <- res }, + mempool.TxInfo{}, + ) + if err != nil { + return nil, err + } + + res := <-resCh + r := res.GetCheckTx() + + return &coretypes.ResultBroadcastTx{ + Code: r.Code, + Data: r.Data, + Log: r.Log, + Codespace: r.Codespace, + MempoolError: r.MempoolError, + Hash: tx.Hash(), + }, nil +} + +// BroadcastTxCommit returns with the responses from CheckTx and DeliverTx. +// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_commit +func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { //nolint:lll + resCh := make(chan *abci.Response, 1) + err := env.Mempool.CheckTx( + ctx.Context(), + tx, + func(res *abci.Response) { resCh <- res }, + mempool.TxInfo{}, + ) + if err != nil { + return nil, err + } + + r := (<-resCh).GetCheckTx() + + if !indexer.KVSinkEnabled(env.EventSinks) { + return &coretypes.ResultBroadcastTxCommit{ + CheckTx: *r, + Hash: tx.Hash(), + }, + errors.New("cannot confirm transaction because kvEventSink is not enabled") + } + + startAt := time.Now() + timer := time.NewTimer(0) + defer timer.Stop() + + count := 0 + for { + count++ + select { + case <-ctx.Context().Done(): + env.Logger.Error("Error on broadcastTxCommit", + "duration", time.Since(startAt), + "err", err) + return &coretypes.ResultBroadcastTxCommit{ + CheckTx: *r, + Hash: tx.Hash(), + }, fmt.Errorf("timeout waiting for commit of tx %s (%s)", + tx.Hash(), time.Since(startAt)) + case <-timer.C: + txres, err := env.Tx(ctx, tx.Hash(), false) + if err != nil { + jitter := 100*time.Millisecond + time.Duration(rand.Int63n(int64(time.Second))) // nolint: gosec + backoff := 100 * time.Duration(count) * time.Millisecond + timer.Reset(jitter + backoff) + continue + } + + return &coretypes.ResultBroadcastTxCommit{ + CheckTx: *r, + DeliverTx: txres.TxResult, + Hash: tx.Hash(), + Height: txres.Height, + }, nil + } + } +} + +// UnconfirmedTxs gets unconfirmed transactions (maximum ?limit entries) +// including their number. +// More: https://docs.tendermint.com/master/rpc/#/Info/unconfirmed_txs +func (env *Environment) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*coretypes.ResultUnconfirmedTxs, error) { + // reuse per_page validator + limit := env.validatePerPage(limitPtr) + + txs := env.Mempool.ReapMaxTxs(limit) + return &coretypes.ResultUnconfirmedTxs{ + Count: len(txs), + Total: env.Mempool.Size(), + TotalBytes: env.Mempool.SizeBytes(), + Txs: txs}, nil +} + +// NumUnconfirmedTxs gets number of unconfirmed transactions. +// More: https://docs.tendermint.com/master/rpc/#/Info/num_unconfirmed_txs +func (env *Environment) NumUnconfirmedTxs(ctx *rpctypes.Context) (*coretypes.ResultUnconfirmedTxs, error) { + return &coretypes.ResultUnconfirmedTxs{ + Count: env.Mempool.Size(), + Total: env.Mempool.Size(), + TotalBytes: env.Mempool.SizeBytes()}, nil +} + +// CheckTx checks the transaction without executing it. The transaction won't +// be added to the mempool either. +// More: https://docs.tendermint.com/master/rpc/#/Tx/check_tx +func (env *Environment) CheckTx(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { + res, err := env.ProxyAppMempool.CheckTxSync(ctx.Context(), abci.RequestCheckTx{Tx: tx}) + if err != nil { + return nil, err + } + return &coretypes.ResultCheckTx{ResponseCheckTx: *res}, nil +} + +func (env *Environment) RemoveTx(ctx *rpctypes.Context, txkey types.TxKey) error { + return env.Mempool.RemoveTxByKey(txkey) +} diff --git a/internal/rpc/core/net.go b/internal/rpc/core/net.go new file mode 100644 index 000000000..fdf4be69b --- /dev/null +++ b/internal/rpc/core/net.go @@ -0,0 +1,67 @@ +package core + +import ( + "errors" + "fmt" + + "github.com/tendermint/tendermint/rpc/coretypes" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" +) + +// NetInfo returns network info. +// More: https://docs.tendermint.com/master/rpc/#/Info/net_info +func (env *Environment) NetInfo(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error) { + peerList := env.PeerManager.Peers() + + peers := make([]coretypes.Peer, 0, len(peerList)) + for _, peer := range peerList { + addrs := env.PeerManager.Addresses(peer) + if len(addrs) == 0 { + continue + } + + peers = append(peers, coretypes.Peer{ + ID: peer, + URL: addrs[0].String(), + }) + } + + return &coretypes.ResultNetInfo{ + Listening: env.P2PTransport.IsListening(), + Listeners: env.P2PTransport.Listeners(), + NPeers: len(peers), + Peers: peers, + }, nil +} + +// Genesis returns genesis file. +// More: https://docs.tendermint.com/master/rpc/#/Info/genesis +func (env *Environment) Genesis(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error) { + if len(env.genChunks) > 1 { + return nil, errors.New("genesis response is large, please use the genesis_chunked API instead") + } + + return &coretypes.ResultGenesis{Genesis: env.GenDoc}, nil +} + +func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*coretypes.ResultGenesisChunk, error) { + if env.genChunks == nil { + return nil, fmt.Errorf("service configuration error, genesis chunks are not initialized") + } + + if len(env.genChunks) == 0 { + return nil, fmt.Errorf("service configuration error, there are no chunks") + } + + id := int(chunk) + + if id > len(env.genChunks)-1 { + return nil, fmt.Errorf("there are %d chunks, %d is invalid", len(env.genChunks)-1, id) + } + + return &coretypes.ResultGenesisChunk{ + TotalChunks: len(env.genChunks), + ChunkNumber: id, + Data: env.genChunks[id], + }, nil +} diff --git a/rpc/core/routes.go b/internal/rpc/core/routes.go similarity index 93% rename from rpc/core/routes.go rename to internal/rpc/core/routes.go index 1eb50fe4e..fe99d2118 100644 --- a/rpc/core/routes.go +++ b/internal/rpc/core/routes.go @@ -28,6 +28,7 @@ func (env *Environment) GetRoutes() RoutesMap { "block_results": rpc.NewRPCFunc(env.BlockResults, "height", true), "commit": rpc.NewRPCFunc(env.Commit, "height", true), "check_tx": rpc.NewRPCFunc(env.CheckTx, "tx", true), + "remove_tx": rpc.NewRPCFunc(env.RemoveTx, "txkey", false), "tx": rpc.NewRPCFunc(env.Tx, "hash,prove", true), "tx_search": rpc.NewRPCFunc(env.TxSearch, "query,prove,page,per_page,order_by", false), "block_search": rpc.NewRPCFunc(env.BlockSearch, "query,page,per_page,order_by", false), @@ -55,7 +56,5 @@ func (env *Environment) GetRoutes() RoutesMap { // AddUnsafeRoutes adds unsafe routes. func (env *Environment) AddUnsafe(routes RoutesMap) { // control API - routes["dial_seeds"] = rpc.NewRPCFunc(env.UnsafeDialSeeds, "seeds", false) - routes["dial_peers"] = rpc.NewRPCFunc(env.UnsafeDialPeers, "peers,persistent,unconditional,private", false) routes["unsafe_flush_mempool"] = rpc.NewRPCFunc(env.UnsafeFlushMempool, "", false) } diff --git a/rpc/core/status.go b/internal/rpc/core/status.go similarity index 77% rename from rpc/core/status.go rename to internal/rpc/core/status.go index 815ab37f5..b883c6dc2 100644 --- a/rpc/core/status.go +++ b/internal/rpc/core/status.go @@ -5,7 +5,7 @@ import ( "time" tmbytes "github.com/tendermint/tendermint/libs/bytes" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -13,7 +13,7 @@ import ( // Status returns Tendermint status including node info, pubkey, latest block // hash, app hash, block height, current max peer block height, and time. // More: https://docs.tendermint.com/master/rpc/#/Info/status -func (env *Environment) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { +func (env *Environment) Status(ctx *rpctypes.Context) (*coretypes.ResultStatus, error) { var ( earliestBlockHeight int64 earliestBlockHash tmbytes.HexBytes @@ -50,17 +50,18 @@ func (env *Environment) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, err if val := env.validatorAtHeight(env.latestUncommittedHeight()); val != nil { votingPower = val.VotingPower } - validatorInfo := ctypes.ValidatorInfo{} + validatorInfo := coretypes.ValidatorInfo{} if env.PubKey != nil { - validatorInfo = ctypes.ValidatorInfo{ + validatorInfo = coretypes.ValidatorInfo{ Address: env.PubKey.Address(), PubKey: env.PubKey, VotingPower: votingPower, } } - result := &ctypes.ResultStatus{ + + result := &coretypes.ResultStatus{ NodeInfo: env.P2PTransport.NodeInfo(), - SyncInfo: ctypes.SyncInfo{ + SyncInfo: coretypes.SyncInfo{ LatestBlockHash: latestBlockHash, LatestAppHash: latestAppHash, LatestBlockHeight: latestHeight, @@ -77,6 +78,16 @@ func (env *Environment) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, err ValidatorInfo: validatorInfo, } + if env.StateSyncMetricer != nil { + result.SyncInfo.TotalSnapshots = env.StateSyncMetricer.TotalSnapshots() + result.SyncInfo.ChunkProcessAvgTime = env.StateSyncMetricer.ChunkProcessAvgTime() + result.SyncInfo.SnapshotHeight = env.StateSyncMetricer.SnapshotHeight() + result.SyncInfo.SnapshotChunksCount = env.StateSyncMetricer.SnapshotChunksCount() + result.SyncInfo.SnapshotChunksTotal = env.StateSyncMetricer.SnapshotChunksTotal() + result.SyncInfo.BackFilledBlocks = env.StateSyncMetricer.BackFilledBlocks() + result.SyncInfo.BackFillBlocksTotal = env.StateSyncMetricer.BackFillBlocksTotal() + } + return result, nil } diff --git a/rpc/core/tx.go b/internal/rpc/core/tx.go similarity index 81% rename from rpc/core/tx.go rename to internal/rpc/core/tx.go index 1b3da3075..60c7519c0 100644 --- a/rpc/core/tx.go +++ b/internal/rpc/core/tx.go @@ -5,11 +5,12 @@ import ( "fmt" "sort" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) @@ -17,9 +18,13 @@ import ( // transaction is in the mempool, invalidated, or was not sent in the first // place. // More: https://docs.tendermint.com/master/rpc/#/Info/tx -func (env *Environment) Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { +func (env *Environment) Tx(ctx *rpctypes.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { // if index is disabled, return error + // N.B. The hash parameter is HexBytes so that the reflective parameter + // decoding logic in the HTTP service will correctly translate from JSON. + // See https://github.com/tendermint/tendermint/issues/6802 for context. + if !indexer.KVSinkEnabled(env.EventSinks) { return nil, errors.New("transaction querying is disabled due to no kvEventSink") } @@ -40,7 +45,7 @@ func (env *Environment) Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*cty proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines } - return &ctypes.ResultTx{ + return &coretypes.ResultTx{ Hash: hash, Height: height, Index: index, @@ -63,7 +68,7 @@ func (env *Environment) TxSearch( prove bool, pagePtr, perPagePtr *int, orderBy string, -) (*ctypes.ResultTxSearch, error) { +) (*coretypes.ResultTxSearch, error) { if !indexer.KVSinkEnabled(env.EventSinks) { return nil, fmt.Errorf("transaction searching is disabled due to no kvEventSink") @@ -98,7 +103,7 @@ func (env *Environment) TxSearch( return results[i].Height < results[j].Height }) default: - return nil, fmt.Errorf("expected order_by to be either `asc` or `desc` or empty: %w", ctypes.ErrInvalidRequest) + return nil, fmt.Errorf("expected order_by to be either `asc` or `desc` or empty: %w", coretypes.ErrInvalidRequest) } // paginate results @@ -113,7 +118,7 @@ func (env *Environment) TxSearch( skipCount := validateSkipCount(page, perPage) pageSize := tmmath.MinInt(perPage, totalCount-skipCount) - apiResults := make([]*ctypes.ResultTx, 0, pageSize) + apiResults := make([]*coretypes.ResultTx, 0, pageSize) for i := skipCount; i < skipCount+pageSize; i++ { r := results[i] @@ -123,7 +128,7 @@ func (env *Environment) TxSearch( proof = block.Data.Txs.Proof(int(r.Index)) // XXX: overflow on 32-bit machines } - apiResults = append(apiResults, &ctypes.ResultTx{ + apiResults = append(apiResults, &coretypes.ResultTx{ Hash: types.Tx(r.Tx).Hash(), Height: r.Height, Index: r.Index, @@ -133,7 +138,7 @@ func (env *Environment) TxSearch( }) } - return &ctypes.ResultTxSearch{Txs: apiResults, TotalCount: totalCount}, nil + return &coretypes.ResultTxSearch{Txs: apiResults, TotalCount: totalCount}, nil } } diff --git a/state/errors.go b/internal/state/errors.go similarity index 100% rename from state/errors.go rename to internal/state/errors.go diff --git a/state/execution.go b/internal/state/execution.go similarity index 98% rename from state/execution.go rename to internal/state/execution.go index 05d5bdd52..e3dc80dca 100644 --- a/state/execution.go +++ b/internal/state/execution.go @@ -7,12 +7,12 @@ import ( "time" abci "github.com/tendermint/tendermint/abci/types" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/libs/fail" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/log" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -37,7 +37,7 @@ type BlockExecutor struct { // manage the mempool lock during commit // and update both with block results after commit. - mempool mempl.Mempool + mempool mempool.Mempool evpool EvidencePool logger log.Logger @@ -61,7 +61,7 @@ func NewBlockExecutor( stateStore Store, logger log.Logger, proxyApp proxy.AppConnConsensus, - mempool mempl.Mempool, + pool mempool.Mempool, evpool EvidencePool, blockStore BlockStore, options ...BlockExecutorOption, @@ -70,7 +70,7 @@ func NewBlockExecutor( store: stateStore, proxyApp: proxyApp, eventBus: types.NopEventBus{}, - mempool: mempool, + mempool: pool, evpool: evpool, logger: logger, metrics: NopMetrics(), @@ -424,7 +424,7 @@ func validateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, } // Check if validator's pubkey matches an ABCI type in the consensus params - pk, err := cryptoenc.PubKeyFromProto(valUpdate.PubKey) + pk, err := encoding.PubKeyFromProto(valUpdate.PubKey) if err != nil { return err } diff --git a/state/execution_test.go b/internal/state/execution_test.go similarity index 92% rename from state/execution_test.go rename to internal/state/execution_test.go index 8e0ec563a..a66b677f9 100644 --- a/state/execution_test.go +++ b/internal/state/execution_test.go @@ -8,23 +8,24 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/tmhash" mmock "github.com/tendermint/tendermint/internal/mempool/mock" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/mocks" + sf "github.com/tendermint/tendermint/internal/state/test/factory" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" tmtime "github.com/tendermint/tendermint/libs/time" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/mocks" - sf "github.com/tendermint/tendermint/state/test/factory" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" - dbm "github.com/tendermint/tm-db" ) var ( @@ -34,8 +35,8 @@ var ( func TestApplyBlock(t *testing.T) { app := &testApp{} - cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc) + cc := abciclient.NewLocalCreator(app) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests @@ -59,8 +60,8 @@ func TestApplyBlock(t *testing.T) { // TestBeginBlockValidators ensures we send absent validators list. func TestBeginBlockValidators(t *testing.T) { app := &testApp{} - cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc) + cc := abciclient.NewLocalCreator(app) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // no need to check error again @@ -122,8 +123,8 @@ func TestBeginBlockValidators(t *testing.T) { // TestBeginBlockByzantineValidators ensures we send byzantine validators list. func TestBeginBlockByzantineValidators(t *testing.T) { app := &testApp{} - cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc) + cc := abciclient.NewLocalCreator(app) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests @@ -217,9 +218,9 @@ func TestBeginBlockByzantineValidators(t *testing.T) { func TestValidateValidatorUpdates(t *testing.T) { pubkey1 := ed25519.GenPrivKey().PubKey() pubkey2 := ed25519.GenPrivKey().PubKey() - pk1, err := cryptoenc.PubKeyToProto(pubkey1) + pk1, err := encoding.PubKeyToProto(pubkey1) assert.NoError(t, err) - pk2, err := cryptoenc.PubKeyToProto(pubkey2) + pk2, err := encoding.PubKeyToProto(pubkey2) assert.NoError(t, err) defaultValidatorParams := types.ValidatorParams{PubKeyTypes: []string{types.ABCIPubKeyTypeEd25519}} @@ -277,9 +278,9 @@ func TestUpdateValidators(t *testing.T) { pubkey2 := ed25519.GenPrivKey().PubKey() val2 := types.NewValidator(pubkey2, 20) - pk, err := cryptoenc.PubKeyToProto(pubkey1) + pk, err := encoding.PubKeyToProto(pubkey1) require.NoError(t, err) - pk2, err := cryptoenc.PubKeyToProto(pubkey2) + pk2, err := encoding.PubKeyToProto(pubkey2) require.NoError(t, err) testCases := []struct { @@ -347,8 +348,8 @@ func TestUpdateValidators(t *testing.T) { // TestEndBlockValidatorUpdates ensures we update validator set and send an event. func TestEndBlockValidatorUpdates(t *testing.T) { app := &testApp{} - cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc) + cc := abciclient.NewLocalCreator(app) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests @@ -384,7 +385,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) { blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} pubkey := ed25519.GenPrivKey().PubKey() - pk, err := cryptoenc.PubKeyToProto(pubkey) + pk, err := encoding.PubKeyToProto(pubkey) require.NoError(t, err) app.ValidatorUpdates = []abci.ValidatorUpdate{ {PubKey: pk, Power: 10}, @@ -420,8 +421,8 @@ func TestEndBlockValidatorUpdates(t *testing.T) { // would result in empty set causes no panic, an error is raised and NextValidators is not updated func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { app := &testApp{} - cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc) + cc := abciclient.NewLocalCreator(app) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests @@ -441,7 +442,7 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { block := sf.MakeBlock(state, 1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} - vp, err := cryptoenc.PubKeyToProto(state.Validators.Validators[0].PubKey) + vp, err := encoding.PubKeyToProto(state.Validators.Validators[0].PubKey) require.NoError(t, err) // Remove the only validator app.ValidatorUpdates = []abci.ValidatorUpdate{ diff --git a/state/export_test.go b/internal/state/export_test.go similarity index 100% rename from state/export_test.go rename to internal/state/export_test.go diff --git a/state/helpers_test.go b/internal/state/helpers_test.go similarity index 94% rename from state/helpers_test.go rename to internal/state/helpers_test.go index 6d575e147..0cedebb00 100644 --- a/state/helpers_test.go +++ b/internal/state/helpers_test.go @@ -7,18 +7,19 @@ import ( dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + sf "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/internal/test/factory" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/types" ) @@ -29,8 +30,8 @@ type paramsChangeTestCase struct { func newTestApp() proxy.AppConns { app := &testApp{} - cc := proxy.NewLocalClientCreator(app) - return proxy.NewAppConns(cc) + cc := abciclient.NewLocalCreator(app) + return proxy.NewAppConns(cc, proxy.NopMetrics()) } func makeAndCommitGoodBlock( @@ -147,11 +148,11 @@ func makeHeaderPartsResponsesValPubKeyChange( // If the pubkey is new, remove the old and add the new. _, val := state.NextValidators.GetByIndex(0) if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) { - vPbPk, err := cryptoenc.PubKeyToProto(val.PubKey) + vPbPk, err := encoding.PubKeyToProto(val.PubKey) if err != nil { panic(err) } - pbPk, err := cryptoenc.PubKeyToProto(pubkey) + pbPk, err := encoding.PubKeyToProto(pubkey) if err != nil { panic(err) } @@ -180,7 +181,7 @@ func makeHeaderPartsResponsesValPowerChange( // If the pubkey is new, remove the old and add the new. _, val := state.NextValidators.GetByIndex(0) if val.VotingPower != power { - vPbPk, err := cryptoenc.PubKeyToProto(val.PubKey) + vPbPk, err := encoding.PubKeyToProto(val.PubKey) if err != nil { panic(err) } diff --git a/state/indexer/block/kv/kv.go b/internal/state/indexer/block/kv/kv.go similarity index 99% rename from state/indexer/block/kv/kv.go rename to internal/state/indexer/block/kv/kv.go index bc90eadf5..d52f06c96 100644 --- a/state/indexer/block/kv/kv.go +++ b/internal/state/indexer/block/kv/kv.go @@ -12,8 +12,8 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/state/indexer/block/kv/kv_test.go b/internal/state/indexer/block/kv/kv_test.go similarity index 94% rename from state/indexer/block/kv/kv_test.go rename to internal/state/indexer/block/kv/kv_test.go index 2a342f870..024df332c 100644 --- a/state/indexer/block/kv/kv_test.go +++ b/internal/state/indexer/block/kv/kv_test.go @@ -6,15 +6,16 @@ import ( "testing" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" + blockidxkv "github.com/tendermint/tendermint/internal/state/indexer/block/kv" "github.com/tendermint/tendermint/libs/pubsub/query" - blockidxkv "github.com/tendermint/tendermint/state/indexer/block/kv" "github.com/tendermint/tendermint/types" - db "github.com/tendermint/tm-db" ) func TestBlockIndexer(t *testing.T) { - store := db.NewPrefixDB(db.NewMemDB(), []byte("block_events")) + store := dbm.NewPrefixDB(dbm.NewMemDB(), []byte("block_events")) indexer := blockidxkv.New(store) require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{ diff --git a/state/indexer/block/kv/util.go b/internal/state/indexer/block/kv/util.go similarity index 100% rename from state/indexer/block/kv/util.go rename to internal/state/indexer/block/kv/util.go diff --git a/state/indexer/block/null/null.go b/internal/state/indexer/block/null/null.go similarity index 91% rename from state/indexer/block/null/null.go rename to internal/state/indexer/block/null/null.go index d36d8680e..9b28d93bb 100644 --- a/state/indexer/block/null/null.go +++ b/internal/state/indexer/block/null/null.go @@ -4,8 +4,8 @@ import ( "context" "errors" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/doc.go b/internal/state/indexer/doc.go new file mode 100644 index 000000000..61adbabac --- /dev/null +++ b/internal/state/indexer/doc.go @@ -0,0 +1,72 @@ +/* +Package indexer defines Tendermint's block and transaction event indexing logic. + +Tendermint supports two primary means of block and transaction event indexing: + +1. A key-value sink via an embedded database with a proprietary query language. +2. A Postgres-based sink. + +An ABCI application can emit events during block and transaction execution in the form + + .= + +for example "transfer.amount=10000". + +An operator can enable one or both of the supported indexing sinks via the +'tx-index.indexer' Tendermint configuration. + +Example: + + [tx-index] + indexer = ["kv", "psql"] + +If an operator wants to completely disable indexing, they may simply just provide +the "null" sink option in the configuration. All other sinks will be ignored if +"null" is provided. + +If indexing is enabled, the indexer.Service will iterate over all enabled sinks +and invoke block and transaction indexing via the appropriate IndexBlockEvents +and IndexTxEvents methods. + +Note, the "kv" sink is considered deprecated and its query functionality is very +limited, but does allow users to directly query for block and transaction events +against Tendermint's RPC. Instead, operators are encouraged to use the "psql" +indexing sink when more complex queries are required and for reliability purposes +as PostgreSQL can scale. + +Prior to starting Tendermint with the "psql" indexing sink enabled, operators +must ensure the following: + +1. The "psql" indexing sink is provided in Tendermint's configuration. +2. A 'tx-index.psql-conn' value is provided that contains the PostgreSQL connection URI. +3. The block and transaction event schemas have been created in the PostgreSQL database. + +Tendermint provides the block and transaction event schemas in the following +path: state/indexer/sink/psql/schema.sql + +To create the schema in a PostgreSQL database, perform the schema query +manually or invoke schema creation via the CLI: + + $ psql -f state/indexer/sink/psql/schema.sql + +The "psql" indexing sink prohibits queries via RPC. When using a PostgreSQL sink, +queries can and should be made directly against the database using SQL. + +The following are some example SQL queries against the database schema: + +* Query for all transaction events for a given transaction hash: + + SELECT * FROM tx_events WHERE hash = '3E7D1F...'; + +* Query for all transaction events for a given block height: + + SELECT * FROM tx_events WHERE height = 25; + +* Query for transaction events that have a given type (i.e. value wildcard): + + SELECT * FROM tx_events WHERE key LIKE '%transfer.recipient%'; + +Note that if a complete abci.TxResult is needed, you will need to join "tx_events" with +"tx_results" via a foreign key, to obtain contains the raw protobuf-encoded abci.TxResult. +*/ +package indexer diff --git a/state/indexer/eventsink.go b/internal/state/indexer/eventsink.go similarity index 96% rename from state/indexer/eventsink.go rename to internal/state/indexer/eventsink.go index d7c9d10e0..dba50b6af 100644 --- a/state/indexer/eventsink.go +++ b/internal/state/indexer/eventsink.go @@ -16,7 +16,7 @@ const ( PSQL EventSinkType = "psql" ) -//go:generate ../../scripts/mockery_generate.sh EventSink +//go:generate ../../../scripts/mockery_generate.sh EventSink // EventSink interface is defined the APIs for the IndexerService to interact with the data store, // including the block/transaction indexing and the search functions. diff --git a/state/indexer/indexer.go b/internal/state/indexer/indexer.go similarity index 100% rename from state/indexer/indexer.go rename to internal/state/indexer/indexer.go diff --git a/state/indexer/indexer_service.go b/internal/state/indexer/indexer_service.go similarity index 66% rename from state/indexer/indexer_service.go rename to internal/state/indexer/indexer_service.go index a429b66a0..39a1847f8 100644 --- a/state/indexer/indexer_service.go +++ b/internal/state/indexer/indexer_service.go @@ -51,43 +51,47 @@ func (is *Service) OnStart() error { go func() { for { - msg := <-blockHeadersSub.Out() + select { + case <-blockHeadersSub.Canceled(): + return + case msg := <-blockHeadersSub.Out(): - eventDataHeader := msg.Data().(types.EventDataNewBlockHeader) - height := eventDataHeader.Header.Height - batch := NewBatch(eventDataHeader.NumTxs) + eventDataHeader := msg.Data().(types.EventDataNewBlockHeader) + height := eventDataHeader.Header.Height + batch := NewBatch(eventDataHeader.NumTxs) - for i := int64(0); i < eventDataHeader.NumTxs; i++ { - msg2 := <-txsSub.Out() - txResult := msg2.Data().(types.EventDataTx).TxResult + for i := int64(0); i < eventDataHeader.NumTxs; i++ { + msg2 := <-txsSub.Out() + txResult := msg2.Data().(types.EventDataTx).TxResult - if err = batch.Add(&txResult); err != nil { - is.Logger.Error( - "failed to add tx to batch", - "height", height, - "index", txResult.Index, - "err", err, - ) - } - } - - if !IndexingEnabled(is.eventSinks) { - continue - } - - for _, sink := range is.eventSinks { - if err := sink.IndexBlockEvents(eventDataHeader); err != nil { - is.Logger.Error("failed to index block", "height", height, "err", err) - } else { - is.Logger.Debug("indexed block", "height", height, "sink", sink.Type()) + if err = batch.Add(&txResult); err != nil { + is.Logger.Error( + "failed to add tx to batch", + "height", height, + "index", txResult.Index, + "err", err, + ) + } } - if len(batch.Ops) > 0 { - err := sink.IndexTxEvents(batch.Ops) - if err != nil { - is.Logger.Error("failed to index block txs", "height", height, "err", err) + if !IndexingEnabled(is.eventSinks) { + continue + } + + for _, sink := range is.eventSinks { + if err := sink.IndexBlockEvents(eventDataHeader); err != nil { + is.Logger.Error("failed to index block", "height", height, "err", err) } else { - is.Logger.Debug("indexed txs", "height", height, "sink", sink.Type()) + is.Logger.Debug("indexed block", "height", height, "sink", sink.Type()) + } + + if len(batch.Ops) > 0 { + err := sink.IndexTxEvents(batch.Ops) + if err != nil { + is.Logger.Error("failed to index block txs", "height", height, "err", err) + } else { + is.Logger.Debug("indexed txs", "height", height, "sink", sink.Type()) + } } } } diff --git a/state/indexer/indexer_service_test.go b/internal/state/indexer/indexer_service_test.go similarity index 90% rename from state/indexer/indexer_service_test.go rename to internal/state/indexer/indexer_service_test.go index 68a00afb5..d9f29b677 100644 --- a/state/indexer/indexer_service_test.go +++ b/internal/state/indexer/indexer_service_test.go @@ -9,19 +9,21 @@ import ( "time" "github.com/adlio/schema" - _ "github.com/lib/pq" dockertest "github.com/ory/dockertest" "github.com/ory/dockertest/docker" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + indexer "github.com/tendermint/tendermint/internal/state/indexer" + kv "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" + psql "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" tmlog "github.com/tendermint/tendermint/libs/log" - indexer "github.com/tendermint/tendermint/state/indexer" - kv "github.com/tendermint/tendermint/state/indexer/sink/kv" - psql "github.com/tendermint/tendermint/state/indexer/sink/psql" "github.com/tendermint/tendermint/types" - db "github.com/tendermint/tm-db" + + // Register the Postgre database driver. + _ "github.com/lib/pq" ) var psqldb *sql.DB @@ -55,7 +57,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { pool, err := setupDB(t) assert.Nil(t, err) - store := db.NewMemDB() + store := dbm.NewMemDB() eventSinks := []indexer.EventSink{kv.NewEventSink(store), pSink} assert.True(t, indexer.KVSinkEnabled(eventSinks)) assert.True(t, indexer.IndexingEnabled(eventSinks)) @@ -139,7 +141,7 @@ func setupDB(t *testing.T) (*dockertest.Pool, error) { assert.Nil(t, err) resource, err = pool.RunWithOptions(&dockertest.RunOptions{ - Repository: psql.DriverName, + Repository: "postgres", Tag: "13", Env: []string{ "POSTGRES_USER=" + user, @@ -164,19 +166,16 @@ func setupDB(t *testing.T) (*dockertest.Pool, error) { conn := fmt.Sprintf(dsn, user, password, resource.GetPort(port+"/tcp"), dbName) - if err = pool.Retry(func() error { - var err error - - pSink, psqldb, err = psql.NewEventSink(conn, "test-chainID") - + assert.NoError(t, pool.Retry(func() error { + sink, err := psql.NewEventSink(conn, "test-chainID") if err != nil { return err } + pSink = sink + psqldb = sink.DB() return psqldb.Ping() - }); err != nil { - assert.Error(t, err) - } + })) resetDB(t) diff --git a/state/indexer/mocks/event_sink.go b/internal/state/indexer/mocks/event_sink.go similarity index 98% rename from state/indexer/mocks/event_sink.go rename to internal/state/indexer/mocks/event_sink.go index ce5b8ace5..98b32e935 100644 --- a/state/indexer/mocks/event_sink.go +++ b/internal/state/indexer/mocks/event_sink.go @@ -6,7 +6,7 @@ import ( context "context" mock "github.com/stretchr/testify/mock" - indexer "github.com/tendermint/tendermint/state/indexer" + indexer "github.com/tendermint/tendermint/internal/state/indexer" query "github.com/tendermint/tendermint/libs/pubsub/query" diff --git a/state/indexer/query_range.go b/internal/state/indexer/query_range.go similarity index 100% rename from state/indexer/query_range.go rename to internal/state/indexer/query_range.go diff --git a/state/indexer/sink/kv/kv.go b/internal/state/indexer/sink/kv/kv.go similarity index 78% rename from state/indexer/sink/kv/kv.go rename to internal/state/indexer/sink/kv/kv.go index 7d51640d8..fe7068a1b 100644 --- a/state/indexer/sink/kv/kv.go +++ b/internal/state/indexer/sink/kv/kv.go @@ -3,13 +3,14 @@ package kv import ( "context" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" - kvb "github.com/tendermint/tendermint/state/indexer/block/kv" - kvt "github.com/tendermint/tendermint/state/indexer/tx/kv" - "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tm-db" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/state/indexer" + kvb "github.com/tendermint/tendermint/internal/state/indexer/block/kv" + kvt "github.com/tendermint/tendermint/internal/state/indexer/tx/kv" + "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/types" ) var _ indexer.EventSink = (*EventSink)(nil) @@ -17,14 +18,16 @@ var _ indexer.EventSink = (*EventSink)(nil) // The EventSink is an aggregator for redirecting the call path of the tx/block kvIndexer. // For the implementation details please see the kv.go in the indexer/block and indexer/tx folder. type EventSink struct { - txi *kvt.TxIndex - bi *kvb.BlockerIndexer + txi *kvt.TxIndex + bi *kvb.BlockerIndexer + store dbm.DB } func NewEventSink(store dbm.DB) indexer.EventSink { return &EventSink{ - txi: kvt.NewTxIndex(store), - bi: kvb.New(store), + txi: kvt.NewTxIndex(store), + bi: kvb.New(store), + store: store, } } @@ -57,5 +60,5 @@ func (kves *EventSink) HasBlock(h int64) (bool, error) { } func (kves *EventSink) Stop() error { - return nil + return kves.store.Close() } diff --git a/state/indexer/sink/kv/kv_test.go b/internal/state/indexer/sink/kv/kv_test.go similarity index 95% rename from state/indexer/sink/kv/kv_test.go rename to internal/state/indexer/sink/kv/kv_test.go index a5d2dd81e..7d7552946 100644 --- a/state/indexer/sink/kv/kv_test.go +++ b/internal/state/indexer/sink/kv/kv_test.go @@ -5,29 +5,30 @@ import ( "fmt" "testing" + dbm "github.com/tendermint/tm-db" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/state/indexer" + kvtx "github.com/tendermint/tendermint/internal/state/indexer/tx/kv" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" - kvtx "github.com/tendermint/tendermint/state/indexer/tx/kv" "github.com/tendermint/tendermint/types" - db "github.com/tendermint/tm-db" ) func TestType(t *testing.T) { - kvSink := NewEventSink(db.NewMemDB()) + kvSink := NewEventSink(dbm.NewMemDB()) assert.Equal(t, indexer.KV, kvSink.Type()) } func TestStop(t *testing.T) { - kvSink := NewEventSink(db.NewMemDB()) + kvSink := NewEventSink(dbm.NewMemDB()) assert.Nil(t, kvSink.Stop()) } func TestBlockFuncs(t *testing.T) { - store := db.NewPrefixDB(db.NewMemDB(), []byte("block_events")) + store := dbm.NewPrefixDB(dbm.NewMemDB(), []byte("block_events")) indexer := NewEventSink(store) require.NoError(t, indexer.IndexBlockEvents(types.EventDataNewBlockHeader{ @@ -158,7 +159,7 @@ func TestBlockFuncs(t *testing.T) { } func TestTxSearchWithCancelation(t *testing.T) { - indexer := NewEventSink(db.NewMemDB()) + indexer := NewEventSink(dbm.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, @@ -180,7 +181,7 @@ func TestTxSearchWithCancelation(t *testing.T) { } func TestTxSearchDeprecatedIndexing(t *testing.T) { - esdb := db.NewMemDB() + esdb := dbm.NewMemDB() indexer := NewEventSink(esdb) // index tx using events indexing (composite key) @@ -260,7 +261,7 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) { } func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { - indexer := NewEventSink(db.NewMemDB()) + indexer := NewEventSink(dbm.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, @@ -282,7 +283,7 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { } func TestTxSearchMultipleTxs(t *testing.T) { - indexer := NewEventSink(db.NewMemDB()) + indexer := NewEventSink(dbm.NewMemDB()) // indexed first, but bigger height (to test the order of transactions) txResult := txResultWithEvents([]abci.Event{ diff --git a/state/indexer/sink/null/null.go b/internal/state/indexer/sink/null/null.go similarity index 94% rename from state/indexer/sink/null/null.go rename to internal/state/indexer/sink/null/null.go index b5ad93ab4..f58142f21 100644 --- a/state/indexer/sink/null/null.go +++ b/internal/state/indexer/sink/null/null.go @@ -4,8 +4,8 @@ import ( "context" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/state/indexer/sink/null/null_test.go b/internal/state/indexer/sink/null/null_test.go similarity index 93% rename from state/indexer/sink/null/null_test.go rename to internal/state/indexer/sink/null/null_test.go index eef63fd6e..15b77dc55 100644 --- a/state/indexer/sink/null/null_test.go +++ b/internal/state/indexer/sink/null/null_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/indexer/sink/psql/psql.go b/internal/state/indexer/sink/psql/psql.go new file mode 100644 index 000000000..4db6f4435 --- /dev/null +++ b/internal/state/indexer/sink/psql/psql.go @@ -0,0 +1,257 @@ +// Package psql implements an event sink backed by a PostgreSQL database. +package psql + +import ( + "context" + "database/sql" + "errors" + "fmt" + "strings" + "time" + + "github.com/gogo/protobuf/proto" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/types" +) + +const ( + tableBlocks = "blocks" + tableTxResults = "tx_results" + tableEvents = "events" + tableAttributes = "attributes" + driverName = "postgres" +) + +// EventSink is an indexer backend providing the tx/block index services. This +// implementation stores records in a PostgreSQL database using the schema +// defined in state/indexer/sink/psql/schema.sql. +type EventSink struct { + store *sql.DB + chainID string +} + +// NewEventSink constructs an event sink associated with the PostgreSQL +// database specified by connStr. Events written to the sink are attributed to +// the specified chainID. +func NewEventSink(connStr, chainID string) (*EventSink, error) { + db, err := sql.Open(driverName, connStr) + if err != nil { + return nil, err + } + + return &EventSink{ + store: db, + chainID: chainID, + }, nil +} + +// DB returns the underlying Postgres connection used by the sink. +// This is exported to support testing. +func (es *EventSink) DB() *sql.DB { return es.store } + +// Type returns the structure type for this sink, which is Postgres. +func (es *EventSink) Type() indexer.EventSinkType { return indexer.PSQL } + +// runInTransaction executes query in a fresh database transaction. +// If query reports an error, the transaction is rolled back and the +// error from query is reported to the caller. +// Otherwise, the result of committing the transaction is returned. +func runInTransaction(db *sql.DB, query func(*sql.Tx) error) error { + dbtx, err := db.Begin() + if err != nil { + return err + } + if err := query(dbtx); err != nil { + _ = dbtx.Rollback() // report the initial error, not the rollback + return err + } + return dbtx.Commit() +} + +// queryWithID executes the specified SQL query with the given arguments, +// expecting a single-row, single-column result containing an ID. If the query +// succeeds, the ID from the result is returned. +func queryWithID(tx *sql.Tx, query string, args ...interface{}) (uint32, error) { + var id uint32 + if err := tx.QueryRow(query, args...).Scan(&id); err != nil { + return 0, err + } + return id, nil +} + +// insertEvents inserts a slice of events and any indexed attributes of those +// events into the database associated with dbtx. +// +// If txID > 0, the event is attributed to the Tendermint transaction with that +// ID; otherwise it is recorded as a block event. +func insertEvents(dbtx *sql.Tx, blockID, txID uint32, evts []abci.Event) error { + // Populate the transaction ID field iff one is defined (> 0). + var txIDArg interface{} + if txID > 0 { + txIDArg = txID + } + + // Add each event to the events table, and retrieve its row ID to use when + // adding any attributes the event provides. + for _, evt := range evts { + // Skip events with an empty type. + if evt.Type == "" { + continue + } + + eid, err := queryWithID(dbtx, ` +INSERT INTO `+tableEvents+` (block_id, tx_id, type) VALUES ($1, $2, $3) + RETURNING rowid; +`, blockID, txIDArg, evt.Type) + if err != nil { + return err + } + + // Add any attributes flagged for indexing. + for _, attr := range evt.Attributes { + if !attr.Index { + continue + } + compositeKey := evt.Type + "." + attr.Key + if _, err := dbtx.Exec(` +INSERT INTO `+tableAttributes+` (event_id, key, composite_key, value) + VALUES ($1, $2, $3, $4); +`, eid, attr.Key, compositeKey, attr.Value); err != nil { + return err + } + } + } + return nil +} + +// makeIndexedEvent constructs an event from the specified composite key and +// value. If the key has the form "type.name", the event will have a single +// attribute with that name and the value; otherwise the event will have only +// a type and no attributes. +func makeIndexedEvent(compositeKey, value string) abci.Event { + i := strings.Index(compositeKey, ".") + if i < 0 { + return abci.Event{Type: compositeKey} + } + return abci.Event{Type: compositeKey[:i], Attributes: []abci.EventAttribute{ + {Key: compositeKey[i+1:], Value: value, Index: true}, + }} +} + +// IndexBlockEvents indexes the specified block header, part of the +// indexer.EventSink interface. +func (es *EventSink) IndexBlockEvents(h types.EventDataNewBlockHeader) error { + ts := time.Now().UTC() + + return runInTransaction(es.store, func(dbtx *sql.Tx) error { + // Add the block to the blocks table and report back its row ID for use + // in indexing the events for the block. + blockID, err := queryWithID(dbtx, ` +INSERT INTO `+tableBlocks+` (height, chain_id, created_at) + VALUES ($1, $2, $3) + ON CONFLICT DO NOTHING + RETURNING rowid; +`, h.Header.Height, es.chainID, ts) + if err == sql.ErrNoRows { + return nil // we already saw this block; quietly succeed + } else if err != nil { + return fmt.Errorf("indexing block header: %w", err) + } + + // Insert the special block meta-event for height. + if err := insertEvents(dbtx, blockID, 0, []abci.Event{ + makeIndexedEvent(types.BlockHeightKey, fmt.Sprint(h.Header.Height)), + }); err != nil { + return fmt.Errorf("block meta-events: %w", err) + } + // Insert all the block events. Order is important here, + if err := insertEvents(dbtx, blockID, 0, h.ResultBeginBlock.Events); err != nil { + return fmt.Errorf("begin-block events: %w", err) + } + if err := insertEvents(dbtx, blockID, 0, h.ResultEndBlock.Events); err != nil { + return fmt.Errorf("end-block events: %w", err) + } + return nil + }) +} + +func (es *EventSink) IndexTxEvents(txrs []*abci.TxResult) error { + ts := time.Now().UTC() + + for _, txr := range txrs { + // Encode the result message in protobuf wire format for indexing. + resultData, err := proto.Marshal(txr) + if err != nil { + return fmt.Errorf("marshaling tx_result: %w", err) + } + + // Index the hash of the underlying transaction as a hex string. + txHash := fmt.Sprintf("%X", types.Tx(txr.Tx).Hash()) + + if err := runInTransaction(es.store, func(dbtx *sql.Tx) error { + // Find the block associated with this transaction. The block header + // must have been indexed prior to the transactions belonging to it. + blockID, err := queryWithID(dbtx, ` +SELECT rowid FROM `+tableBlocks+` WHERE height = $1 AND chain_id = $2; +`, txr.Height, es.chainID) + if err != nil { + return fmt.Errorf("finding block ID: %w", err) + } + + // Insert a record for this tx_result and capture its ID for indexing events. + txID, err := queryWithID(dbtx, ` +INSERT INTO `+tableTxResults+` (block_id, index, created_at, tx_hash, tx_result) + VALUES ($1, $2, $3, $4, $5) + ON CONFLICT DO NOTHING + RETURNING rowid; +`, blockID, txr.Index, ts, txHash, resultData) + if err == sql.ErrNoRows { + return nil // we already saw this transaction; quietly succeed + } else if err != nil { + return fmt.Errorf("indexing tx_result: %w", err) + } + + // Insert the special transaction meta-events for hash and height. + if err := insertEvents(dbtx, blockID, txID, []abci.Event{ + makeIndexedEvent(types.TxHashKey, txHash), + makeIndexedEvent(types.TxHeightKey, fmt.Sprint(txr.Height)), + }); err != nil { + return fmt.Errorf("indexing transaction meta-events: %w", err) + } + // Index any events packaged with the transaction. + if err := insertEvents(dbtx, blockID, txID, txr.Result.Events); err != nil { + return fmt.Errorf("indexing transaction events: %w", err) + } + return nil + + }); err != nil { + return err + } + } + return nil +} + +// SearchBlockEvents is not implemented by this sink, and reports an error for all queries. +func (es *EventSink) SearchBlockEvents(ctx context.Context, q *query.Query) ([]int64, error) { + return nil, errors.New("block search is not supported via the postgres event sink") +} + +// SearchTxEvents is not implemented by this sink, and reports an error for all queries. +func (es *EventSink) SearchTxEvents(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) { + return nil, errors.New("tx search is not supported via the postgres event sink") +} + +// GetTxByHash is not implemented by this sink, and reports an error for all queries. +func (es *EventSink) GetTxByHash(hash []byte) (*abci.TxResult, error) { + return nil, errors.New("getTxByHash is not supported via the postgres event sink") +} + +// HasBlock is not implemented by this sink, and reports an error for all queries. +func (es *EventSink) HasBlock(h int64) (bool, error) { + return false, errors.New("hasBlock is not supported via the postgres event sink") +} + +// Stop closes the underlying PostgreSQL database. +func (es *EventSink) Stop() error { return es.store.Close() } diff --git a/internal/state/indexer/sink/psql/psql_test.go b/internal/state/indexer/sink/psql/psql_test.go new file mode 100644 index 000000000..f19bbfba7 --- /dev/null +++ b/internal/state/indexer/sink/psql/psql_test.go @@ -0,0 +1,344 @@ +package psql + +import ( + "context" + "database/sql" + "flag" + "fmt" + "io/ioutil" + "log" + "os" + "os/signal" + "testing" + "time" + + "github.com/adlio/schema" + "github.com/gogo/protobuf/proto" + "github.com/ory/dockertest" + "github.com/ory/dockertest/docker" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/types" + + // Register the Postgres database driver. + _ "github.com/lib/pq" +) + +// Verify that the type satisfies the EventSink interface. +var _ indexer.EventSink = (*EventSink)(nil) + +var ( + doPauseAtExit = flag.Bool("pause-at-exit", false, + "If true, pause the test until interrupted at shutdown, to allow debugging") + + // A hook that test cases can call to obtain the shared database instance + // used for testing the sink. This is initialized in TestMain (see below). + testDB func() *sql.DB +) + +const ( + user = "postgres" + password = "secret" + port = "5432" + dsn = "postgres://%s:%s@localhost:%s/%s?sslmode=disable" + dbName = "postgres" + chainID = "test-chainID" + + viewBlockEvents = "block_events" + viewTxEvents = "tx_events" +) + +func TestMain(m *testing.M) { + flag.Parse() + + // Set up docker and start a container running PostgreSQL. + pool, err := dockertest.NewPool(os.Getenv("DOCKER_URL")) + if err != nil { + log.Fatalf("Creating docker pool: %v", err) + } + + resource, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "postgres", + Tag: "13", + Env: []string{ + "POSTGRES_USER=" + user, + "POSTGRES_PASSWORD=" + password, + "POSTGRES_DB=" + dbName, + "listen_addresses = '*'", + }, + ExposedPorts: []string{port}, + }, func(config *docker.HostConfig) { + // set AutoRemove to true so that stopped container goes away by itself + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{ + Name: "no", + } + }) + if err != nil { + log.Fatalf("Starting docker pool: %v", err) + } + + if *doPauseAtExit { + log.Print("Pause at exit is enabled, containers will not expire") + } else { + const expireSeconds = 60 + _ = resource.Expire(expireSeconds) + log.Printf("Container expiration set to %d seconds", expireSeconds) + } + + // Connect to the database, clear any leftover data, and install the + // indexing schema. + conn := fmt.Sprintf(dsn, user, password, resource.GetPort(port+"/tcp"), dbName) + var db *sql.DB + + if err := pool.Retry(func() error { + sink, err := NewEventSink(conn, chainID) + if err != nil { + return err + } + db = sink.DB() // set global for test use + return db.Ping() + }); err != nil { + log.Fatalf("Connecting to database: %v", err) + } + + if err := resetDatabase(db); err != nil { + log.Fatalf("Flushing database: %v", err) + } + + sm, err := readSchema() + if err != nil { + log.Fatalf("Reading schema: %v", err) + } else if err := schema.NewMigrator().Apply(db, sm); err != nil { + log.Fatalf("Applying schema: %v", err) + } + + // Set up the hook for tests to get the shared database handle. + testDB = func() *sql.DB { return db } + + // Run the selected test cases. + code := m.Run() + + // Clean up and shut down the database container. + if *doPauseAtExit { + log.Print("Testing complete, pausing for inspection. Send SIGINT to resume teardown") + waitForInterrupt() + log.Print("(resuming)") + } + log.Print("Shutting down database") + if err := pool.Purge(resource); err != nil { + log.Printf("WARNING: Purging pool failed: %v", err) + } + if err := db.Close(); err != nil { + log.Printf("WARNING: Closing database failed: %v", err) + } + + os.Exit(code) +} + +func TestType(t *testing.T) { + psqlSink := &EventSink{store: testDB(), chainID: chainID} + assert.Equal(t, indexer.PSQL, psqlSink.Type()) +} + +func TestIndexing(t *testing.T) { + t.Run("IndexBlockEvents", func(t *testing.T) { + indexer := &EventSink{store: testDB(), chainID: chainID} + require.NoError(t, indexer.IndexBlockEvents(newTestBlockHeader())) + + verifyBlock(t, 1) + verifyBlock(t, 2) + + verifyNotImplemented(t, "hasBlock", func() (bool, error) { return indexer.HasBlock(1) }) + verifyNotImplemented(t, "hasBlock", func() (bool, error) { return indexer.HasBlock(2) }) + + verifyNotImplemented(t, "block search", func() (bool, error) { + v, err := indexer.SearchBlockEvents(context.Background(), nil) + return v != nil, err + }) + + require.NoError(t, verifyTimeStamp(tableBlocks)) + + // Attempting to reindex the same events should gracefully succeed. + require.NoError(t, indexer.IndexBlockEvents(newTestBlockHeader())) + }) + + t.Run("IndexTxEvents", func(t *testing.T) { + indexer := &EventSink{store: testDB(), chainID: chainID} + + txResult := txResultWithEvents([]abci.Event{ + makeIndexedEvent("account.number", "1"), + makeIndexedEvent("account.owner", "Ivan"), + makeIndexedEvent("account.owner", "Yulieta"), + + {Type: "", Attributes: []abci.EventAttribute{{Key: "not_allowed", Value: "Vlad", Index: true}}}, + }) + require.NoError(t, indexer.IndexTxEvents([]*abci.TxResult{txResult})) + + txr, err := loadTxResult(types.Tx(txResult.Tx).Hash()) + require.NoError(t, err) + assert.Equal(t, txResult, txr) + + require.NoError(t, verifyTimeStamp(tableTxResults)) + require.NoError(t, verifyTimeStamp(viewTxEvents)) + + verifyNotImplemented(t, "getTxByHash", func() (bool, error) { + txr, err := indexer.GetTxByHash(types.Tx(txResult.Tx).Hash()) + return txr != nil, err + }) + verifyNotImplemented(t, "tx search", func() (bool, error) { + txr, err := indexer.SearchTxEvents(context.Background(), nil) + return txr != nil, err + }) + + // try to insert the duplicate tx events. + err = indexer.IndexTxEvents([]*abci.TxResult{txResult}) + require.NoError(t, err) + }) +} + +func TestStop(t *testing.T) { + indexer := &EventSink{store: testDB()} + require.NoError(t, indexer.Stop()) +} + +// newTestBlockHeader constructs a fresh copy of a block header containing +// known test values to exercise the indexer. +func newTestBlockHeader() types.EventDataNewBlockHeader { + return types.EventDataNewBlockHeader{ + Header: types.Header{Height: 1}, + ResultBeginBlock: abci.ResponseBeginBlock{ + Events: []abci.Event{ + makeIndexedEvent("begin_event.proposer", "FCAA001"), + makeIndexedEvent("thingy.whatzit", "O.O"), + }, + }, + ResultEndBlock: abci.ResponseEndBlock{ + Events: []abci.Event{ + makeIndexedEvent("end_event.foo", "100"), + makeIndexedEvent("thingy.whatzit", "-.O"), + }, + }, + } +} + +// readSchema loads the indexing database schema file +func readSchema() ([]*schema.Migration, error) { + const filename = "schema.sql" + contents, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("failed to read sql file from '%s': %w", filename, err) + } + + return []*schema.Migration{{ + ID: time.Now().Local().String() + " db schema", + Script: string(contents), + }}, nil +} + +// resetDB drops all the data from the test database. +func resetDatabase(db *sql.DB) error { + _, err := db.Exec(`DROP TABLE IF EXISTS blocks,tx_results,events,attributes CASCADE;`) + if err != nil { + return fmt.Errorf("dropping tables: %v", err) + } + _, err = db.Exec(`DROP VIEW IF EXISTS event_attributes,block_events,tx_events CASCADE;`) + if err != nil { + return fmt.Errorf("dropping views: %v", err) + } + return nil +} + +// txResultWithEvents constructs a fresh transaction result with fixed values +// for testing, that includes the specified events. +func txResultWithEvents(events []abci.Event) *abci.TxResult { + return &abci.TxResult{ + Height: 1, + Index: 0, + Tx: types.Tx("HELLO WORLD"), + Result: abci.ResponseDeliverTx{ + Data: []byte{0}, + Code: abci.CodeTypeOK, + Log: "", + Events: events, + }, + } +} + +func loadTxResult(hash []byte) (*abci.TxResult, error) { + hashString := fmt.Sprintf("%X", hash) + var resultData []byte + if err := testDB().QueryRow(` +SELECT tx_result FROM `+tableTxResults+` WHERE tx_hash = $1; +`, hashString).Scan(&resultData); err != nil { + return nil, fmt.Errorf("lookup transaction for hash %q failed: %v", hashString, err) + } + + txr := new(abci.TxResult) + if err := proto.Unmarshal(resultData, txr); err != nil { + return nil, fmt.Errorf("unmarshaling txr: %v", err) + } + + return txr, nil +} + +func verifyTimeStamp(tableName string) error { + return testDB().QueryRow(fmt.Sprintf(` +SELECT DISTINCT %[1]s.created_at + FROM %[1]s + WHERE %[1]s.created_at >= $1; +`, tableName), time.Now().Add(-2*time.Second)).Err() +} + +func verifyBlock(t *testing.T, height int64) { + // Check that the blocks table contains an entry for this height. + if err := testDB().QueryRow(` +SELECT height FROM `+tableBlocks+` WHERE height = $1; +`, height).Err(); err == sql.ErrNoRows { + t.Errorf("No block found for height=%d", height) + } else if err != nil { + t.Fatalf("Database query failed: %v", err) + } + + // Verify the presence of begin_block and end_block events. + if err := testDB().QueryRow(` +SELECT type, height, chain_id FROM `+viewBlockEvents+` + WHERE height = $1 AND type = $2 AND chain_id = $3; +`, height, types.EventTypeBeginBlock, chainID).Err(); err == sql.ErrNoRows { + t.Errorf("No %q event found for height=%d", types.EventTypeBeginBlock, height) + } else if err != nil { + t.Fatalf("Database query failed: %v", err) + } + + if err := testDB().QueryRow(` +SELECT type, height, chain_id FROM `+viewBlockEvents+` + WHERE height = $1 AND type = $2 AND chain_id = $3; +`, height, types.EventTypeEndBlock, chainID).Err(); err == sql.ErrNoRows { + t.Errorf("No %q event found for height=%d", types.EventTypeEndBlock, height) + } else if err != nil { + t.Fatalf("Database query failed: %v", err) + } +} + +// verifyNotImplemented calls f and verifies that it returns both a +// false-valued flag and a non-nil error whose string matching the expected +// "not supported" message with label prefixed. +func verifyNotImplemented(t *testing.T, label string, f func() (bool, error)) { + t.Helper() + t.Logf("Verifying that %q reports it is not implemented", label) + + want := label + " is not supported via the postgres event sink" + ok, err := f() + assert.False(t, ok) + require.NotNil(t, err) + assert.Equal(t, want, err.Error()) +} + +// waitForInterrupt blocks until a SIGINT is received by the process. +func waitForInterrupt() { + ch := make(chan os.Signal, 1) + signal.Notify(ch, os.Interrupt) + <-ch +} diff --git a/internal/state/indexer/sink/psql/schema.sql b/internal/state/indexer/sink/psql/schema.sql new file mode 100644 index 000000000..1091cd4c3 --- /dev/null +++ b/internal/state/indexer/sink/psql/schema.sql @@ -0,0 +1,85 @@ +/* + This file defines the database schema for the PostgresQL ("psql") event sink + implementation in Tendermint. The operator must create a database and install + this schema before using the database to index events. + */ + +-- The blocks table records metadata about each block. +-- The block record does not include its events or transactions (see tx_results). +CREATE TABLE blocks ( + rowid BIGSERIAL PRIMARY KEY, + + height BIGINT NOT NULL, + chain_id VARCHAR NOT NULL, + + -- When this block header was logged into the sink, in UTC. + created_at TIMESTAMPTZ NOT NULL, + + UNIQUE (height, chain_id) +); + +-- Index blocks by height and chain, since we need to resolve block IDs when +-- indexing transaction records and transaction events. +CREATE INDEX idx_blocks_height_chain ON blocks(height, chain_id); + +-- The tx_results table records metadata about transaction results. Note that +-- the events from a transaction are stored separately. +CREATE TABLE tx_results ( + rowid BIGSERIAL PRIMARY KEY, + + -- The block to which this transaction belongs. + block_id BIGINT NOT NULL REFERENCES blocks(rowid), + -- The sequential index of the transaction within the block. + index INTEGER NOT NULL, + -- When this result record was logged into the sink, in UTC. + created_at TIMESTAMPTZ NOT NULL, + -- The hex-encoded hash of the transaction. + tx_hash VARCHAR NOT NULL, + -- The protobuf wire encoding of the TxResult message. + tx_result BYTEA NOT NULL, + + UNIQUE (block_id, index) +); + +-- The events table records events. All events (both block and transaction) are +-- associated with a block ID; transaction events also have a transaction ID. +CREATE TABLE events ( + rowid BIGSERIAL PRIMARY KEY, + + -- The block and transaction this event belongs to. + -- If tx_id is NULL, this is a block event. + block_id BIGINT NOT NULL REFERENCES blocks(rowid), + tx_id BIGINT NULL REFERENCES tx_results(rowid), + + -- The application-defined type label for the event. + type VARCHAR NOT NULL +); + +-- The attributes table records event attributes. +CREATE TABLE attributes ( + event_id BIGINT NOT NULL REFERENCES events(rowid), + key VARCHAR NOT NULL, -- bare key + composite_key VARCHAR NOT NULL, -- composed type.key + value VARCHAR NULL, + + UNIQUE (event_id, key) +); + +-- A joined view of events and their attributes. Events that do not have any +-- attributes are represented as a single row with empty key and value fields. +CREATE VIEW event_attributes AS + SELECT block_id, tx_id, type, key, composite_key, value + FROM events LEFT JOIN attributes ON (events.rowid = attributes.event_id); + +-- A joined view of all block events (those having tx_id NULL). +CREATE VIEW block_events AS + SELECT blocks.rowid as block_id, height, chain_id, type, key, composite_key, value + FROM blocks JOIN event_attributes ON (blocks.rowid = event_attributes.block_id) + WHERE event_attributes.tx_id IS NULL; + +-- A joined view of all transaction events. +CREATE VIEW tx_events AS + SELECT height, index, chain_id, type, key, composite_key, value, tx_results.created_at + FROM blocks JOIN tx_results ON (blocks.rowid = tx_results.block_id) + JOIN event_attributes ON (tx_results.rowid = event_attributes.tx_id) + WHERE event_attributes.tx_id IS NOT NULL; diff --git a/internal/state/indexer/sink/sink.go b/internal/state/indexer/sink/sink.go new file mode 100644 index 000000000..b4c41ec31 --- /dev/null +++ b/internal/state/indexer/sink/sink.go @@ -0,0 +1,65 @@ +package sink + +import ( + "errors" + "strings" + + "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer/sink/kv" + "github.com/tendermint/tendermint/internal/state/indexer/sink/null" + "github.com/tendermint/tendermint/internal/state/indexer/sink/psql" +) + +// EventSinksFromConfig constructs a slice of indexer.EventSink using the provided +// configuration. +// +//nolint:lll +func EventSinksFromConfig(cfg *config.Config, dbProvider config.DBProvider, chainID string) ([]indexer.EventSink, error) { + if len(cfg.TxIndex.Indexer) == 0 { + return []indexer.EventSink{null.NewEventSink()}, nil + } + + // check for duplicated sinks + sinks := map[string]struct{}{} + for _, s := range cfg.TxIndex.Indexer { + sl := strings.ToLower(s) + if _, ok := sinks[sl]; ok { + return nil, errors.New("found duplicated sinks, please check the tx-index section in the config.toml") + } + sinks[sl] = struct{}{} + } + eventSinks := []indexer.EventSink{} + for k := range sinks { + switch indexer.EventSinkType(k) { + case indexer.NULL: + // When we see null in the config, the eventsinks will be reset with the + // nullEventSink. + return []indexer.EventSink{null.NewEventSink()}, nil + + case indexer.KV: + store, err := dbProvider(&config.DBContext{ID: "tx_index", Config: cfg}) + if err != nil { + return nil, err + } + + eventSinks = append(eventSinks, kv.NewEventSink(store)) + + case indexer.PSQL: + conn := cfg.TxIndex.PsqlConn + if conn == "" { + return nil, errors.New("the psql connection settings cannot be empty") + } + + es, err := psql.NewEventSink(conn, chainID) + if err != nil { + return nil, err + } + eventSinks = append(eventSinks, es) + default: + return nil, errors.New("unsupported event sink type") + } + } + return eventSinks, nil + +} diff --git a/state/indexer/tx/kv/kv.go b/internal/state/indexer/tx/kv/kv.go similarity index 99% rename from state/indexer/tx/kv/kv.go rename to internal/state/indexer/tx/kv/kv.go index 080dbce2c..f0550f8f3 100644 --- a/state/indexer/tx/kv/kv.go +++ b/internal/state/indexer/tx/kv/kv.go @@ -12,8 +12,8 @@ import ( dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + indexer "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" - indexer "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) diff --git a/state/indexer/tx/kv/kv_bench_test.go b/internal/state/indexer/tx/kv/kv_bench_test.go similarity index 100% rename from state/indexer/tx/kv/kv_bench_test.go rename to internal/state/indexer/tx/kv/kv_bench_test.go diff --git a/state/indexer/tx/kv/kv_test.go b/internal/state/indexer/tx/kv/kv_test.go similarity index 96% rename from state/indexer/tx/kv/kv_test.go rename to internal/state/indexer/tx/kv/kv_test.go index dd63dd9a4..c8ab2b0f2 100644 --- a/state/indexer/tx/kv/kv_test.go +++ b/internal/state/indexer/tx/kv/kv_test.go @@ -10,18 +10,17 @@ import ( "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - db "github.com/tendermint/tm-db" + dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" + indexer "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" tmrand "github.com/tendermint/tendermint/libs/rand" - indexer "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/types" ) func TestTxIndex(t *testing.T) { - txIndexer := NewTxIndex(db.NewMemDB()) + txIndexer := NewTxIndex(dbm.NewMemDB()) tx := types.Tx("HELLO WORLD") txResult := &abci.TxResult{ @@ -67,7 +66,7 @@ func TestTxIndex(t *testing.T) { } func TestTxSearch(t *testing.T) { - indexer := NewTxIndex(db.NewMemDB()) + indexer := NewTxIndex(dbm.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, @@ -147,7 +146,7 @@ func TestTxSearch(t *testing.T) { } func TestTxSearchWithCancelation(t *testing.T) { - indexer := NewTxIndex(db.NewMemDB()) + indexer := NewTxIndex(dbm.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, @@ -165,7 +164,7 @@ func TestTxSearchWithCancelation(t *testing.T) { } func TestTxSearchDeprecatedIndexing(t *testing.T) { - indexer := NewTxIndex(db.NewMemDB()) + indexer := NewTxIndex(dbm.NewMemDB()) // index tx using events indexing (composite key) txResult1 := txResultWithEvents([]abci.Event{ @@ -244,7 +243,7 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) { } func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { - indexer := NewTxIndex(db.NewMemDB()) + indexer := NewTxIndex(dbm.NewMemDB()) txResult := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, @@ -266,7 +265,7 @@ func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { } func TestTxSearchMultipleTxs(t *testing.T) { - indexer := NewTxIndex(db.NewMemDB()) + indexer := NewTxIndex(dbm.NewMemDB()) // indexed first, but bigger height (to test the order of transactions) txResult := txResultWithEvents([]abci.Event{ @@ -339,7 +338,7 @@ func benchmarkTxIndex(txsCount int64, b *testing.B) { require.NoError(b, err) defer os.RemoveAll(dir) - store, err := db.NewDB("tx_index", "goleveldb", dir) + store, err := dbm.NewDB("tx_index", "goleveldb", dir) require.NoError(b, err) txIndexer := NewTxIndex(store) diff --git a/state/indexer/tx/kv/utils.go b/internal/state/indexer/tx/kv/utils.go similarity index 100% rename from state/indexer/tx/kv/utils.go rename to internal/state/indexer/tx/kv/utils.go diff --git a/state/indexer/tx/kv/utils_test.go b/internal/state/indexer/tx/kv/utils_test.go similarity index 100% rename from state/indexer/tx/kv/utils_test.go rename to internal/state/indexer/tx/kv/utils_test.go diff --git a/state/indexer/tx/null/null.go b/internal/state/indexer/tx/null/null.go similarity index 93% rename from state/indexer/tx/null/null.go rename to internal/state/indexer/tx/null/null.go index d92ed489e..0da7fc683 100644 --- a/state/indexer/tx/null/null.go +++ b/internal/state/indexer/tx/null/null.go @@ -5,8 +5,8 @@ import ( "errors" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" ) var _ indexer.TxIndexer = (*TxIndex)(nil) diff --git a/state/metrics.go b/internal/state/metrics.go similarity index 100% rename from state/metrics.go rename to internal/state/metrics.go diff --git a/state/mocks/block_store.go b/internal/state/mocks/block_store.go similarity index 100% rename from state/mocks/block_store.go rename to internal/state/mocks/block_store.go diff --git a/state/mocks/event_sink.go b/internal/state/mocks/event_sink.go similarity index 98% rename from state/mocks/event_sink.go rename to internal/state/mocks/event_sink.go index 749515ccf..b8a8fc464 100644 --- a/state/mocks/event_sink.go +++ b/internal/state/mocks/event_sink.go @@ -6,7 +6,7 @@ import ( context "context" mock "github.com/stretchr/testify/mock" - indexer "github.com/tendermint/tendermint/state/indexer" + indexer "github.com/tendermint/tendermint/internal/state/indexer" query "github.com/tendermint/tendermint/libs/pubsub/query" diff --git a/state/mocks/evidence_pool.go b/internal/state/mocks/evidence_pool.go similarity index 96% rename from state/mocks/evidence_pool.go rename to internal/state/mocks/evidence_pool.go index bb33547d2..8bf4a9b64 100644 --- a/state/mocks/evidence_pool.go +++ b/internal/state/mocks/evidence_pool.go @@ -4,8 +4,7 @@ package mocks import ( mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/state" - + state "github.com/tendermint/tendermint/internal/state" types "github.com/tendermint/tendermint/types" ) diff --git a/state/mocks/store.go b/internal/state/mocks/store.go similarity index 98% rename from state/mocks/store.go rename to internal/state/mocks/store.go index 750bf7f29..4452f9bec 100644 --- a/state/mocks/store.go +++ b/internal/state/mocks/store.go @@ -4,8 +4,7 @@ package mocks import ( mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/state" - + state "github.com/tendermint/tendermint/internal/state" tendermintstate "github.com/tendermint/tendermint/proto/tendermint/state" types "github.com/tendermint/tendermint/types" diff --git a/internal/state/rollback.go b/internal/state/rollback.go new file mode 100644 index 000000000..6e13da0e2 --- /dev/null +++ b/internal/state/rollback.go @@ -0,0 +1,87 @@ +package state + +import ( + "errors" + "fmt" + + "github.com/tendermint/tendermint/version" +) + +// Rollback overwrites the current Tendermint state (height n) with the most +// recent previous state (height n - 1). +// Note that this function does not affect application state. +func Rollback(bs BlockStore, ss Store) (int64, []byte, error) { + invalidState, err := ss.Load() + if err != nil { + return -1, nil, err + } + if invalidState.IsEmpty() { + return -1, nil, errors.New("no state found") + } + + rollbackHeight := invalidState.LastBlockHeight + rollbackBlock := bs.LoadBlockMeta(rollbackHeight) + if rollbackBlock == nil { + return -1, nil, fmt.Errorf("block at height %d not found", rollbackHeight) + } + + previousValidatorSet, err := ss.LoadValidators(rollbackHeight - 1) + if err != nil { + return -1, nil, err + } + + previousParams, err := ss.LoadConsensusParams(rollbackHeight) + if err != nil { + return -1, nil, err + } + + valChangeHeight := invalidState.LastHeightValidatorsChanged + // this can only happen if the validator set changed since the last block + if valChangeHeight > rollbackHeight { + valChangeHeight = rollbackHeight + } + + paramsChangeHeight := invalidState.LastHeightConsensusParamsChanged + // this can only happen if params changed from the last block + if paramsChangeHeight > rollbackHeight { + paramsChangeHeight = rollbackHeight + } + + // build the new state from the old state and the prior block + rolledBackState := State{ + Version: Version{ + Consensus: version.Consensus{ + Block: version.BlockProtocol, + App: previousParams.Version.AppVersion, + }, + Software: version.TMVersion, + }, + // immutable fields + ChainID: invalidState.ChainID, + InitialHeight: invalidState.InitialHeight, + + LastBlockHeight: invalidState.LastBlockHeight - 1, + LastBlockID: rollbackBlock.Header.LastBlockID, + LastBlockTime: rollbackBlock.Header.Time, + + NextValidators: invalidState.Validators, + Validators: invalidState.LastValidators, + LastValidators: previousValidatorSet, + LastHeightValidatorsChanged: valChangeHeight, + + ConsensusParams: previousParams, + LastHeightConsensusParamsChanged: paramsChangeHeight, + + LastResultsHash: rollbackBlock.Header.LastResultsHash, + AppHash: rollbackBlock.Header.AppHash, + } + + // persist the new state. This overrides the invalid one. NOTE: this will also + // persist the validator set and consensus params over the existing structures, + // but both should be the same + if err := ss.Save(rolledBackState); err != nil { + return -1, nil, fmt.Errorf("failed to save rolled back state: %w", err) + } + + return rolledBackState.LastBlockHeight, rolledBackState.AppHash, nil +} diff --git a/internal/state/rollback_test.go b/internal/state/rollback_test.go new file mode 100644 index 000000000..ae5c8ee84 --- /dev/null +++ b/internal/state/rollback_test.go @@ -0,0 +1,146 @@ +package state_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/mocks" + "github.com/tendermint/tendermint/internal/test/factory" + "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/version" +) + +func TestRollback(t *testing.T) { + stateStore := state.NewStore(dbm.NewMemDB()) + blockStore := &mocks.BlockStore{} + var ( + height int64 = 100 + appVersion uint64 = 10 + ) + + valSet, _ := factory.RandValidatorSet(5, 10) + + params := types.DefaultConsensusParams() + params.Version.AppVersion = appVersion + newParams := types.DefaultConsensusParams() + newParams.Block.MaxBytes = 10000 + + initialState := state.State{ + Version: state.Version{ + Consensus: version.Consensus{ + Block: version.BlockProtocol, + App: 10, + }, + Software: version.TMVersion, + }, + ChainID: factory.DefaultTestChainID, + InitialHeight: 10, + LastBlockID: factory.MakeBlockID(), + AppHash: factory.RandomHash(), + LastResultsHash: factory.RandomHash(), + LastBlockHeight: height, + LastValidators: valSet, + Validators: valSet.CopyIncrementProposerPriority(1), + NextValidators: valSet.CopyIncrementProposerPriority(2), + LastHeightValidatorsChanged: height + 1, + ConsensusParams: *params, + LastHeightConsensusParamsChanged: height + 1, + } + require.NoError(t, stateStore.Bootstrap(initialState)) + + height++ + block := &types.BlockMeta{ + Header: types.Header{ + Height: height, + AppHash: initialState.AppHash, + LastBlockID: initialState.LastBlockID, + LastResultsHash: initialState.LastResultsHash, + }, + } + blockStore.On("LoadBlockMeta", height).Return(block) + + appVersion++ + newParams.Version.AppVersion = appVersion + nextState := initialState.Copy() + nextState.LastBlockHeight = height + nextState.Version.Consensus.App = appVersion + nextState.LastBlockID = factory.MakeBlockID() + nextState.AppHash = factory.RandomHash() + nextState.LastValidators = initialState.Validators + nextState.Validators = initialState.NextValidators + nextState.NextValidators = initialState.NextValidators.CopyIncrementProposerPriority(1) + nextState.ConsensusParams = *newParams + nextState.LastHeightConsensusParamsChanged = height + 1 + nextState.LastHeightValidatorsChanged = height + 1 + + // update the state + require.NoError(t, stateStore.Save(nextState)) + + // rollback the state + rollbackHeight, rollbackHash, err := state.Rollback(blockStore, stateStore) + require.NoError(t, err) + require.EqualValues(t, int64(100), rollbackHeight) + require.EqualValues(t, initialState.AppHash, rollbackHash) + blockStore.AssertExpectations(t) + + // assert that we've recovered the prior state + loadedState, err := stateStore.Load() + require.NoError(t, err) + require.EqualValues(t, initialState, loadedState) +} + +func TestRollbackNoState(t *testing.T) { + stateStore := state.NewStore(dbm.NewMemDB()) + blockStore := &mocks.BlockStore{} + + _, _, err := state.Rollback(blockStore, stateStore) + require.Error(t, err) + require.Contains(t, err.Error(), "no state found") +} + +func TestRollbackNoBlocks(t *testing.T) { + stateStore := state.NewStore(dbm.NewMemDB()) + blockStore := &mocks.BlockStore{} + var ( + height int64 = 100 + appVersion uint64 = 10 + ) + + valSet, _ := factory.RandValidatorSet(5, 10) + + params := types.DefaultConsensusParams() + params.Version.AppVersion = appVersion + newParams := types.DefaultConsensusParams() + newParams.Block.MaxBytes = 10000 + + initialState := state.State{ + Version: state.Version{ + Consensus: version.Consensus{ + Block: version.BlockProtocol, + App: 10, + }, + Software: version.TMVersion, + }, + ChainID: factory.DefaultTestChainID, + InitialHeight: 10, + LastBlockID: factory.MakeBlockID(), + AppHash: factory.RandomHash(), + LastResultsHash: factory.RandomHash(), + LastBlockHeight: height, + LastValidators: valSet, + Validators: valSet.CopyIncrementProposerPriority(1), + NextValidators: valSet.CopyIncrementProposerPriority(2), + LastHeightValidatorsChanged: height + 1, + ConsensusParams: *params, + LastHeightConsensusParamsChanged: height + 1, + } + require.NoError(t, stateStore.Save(initialState)) + blockStore.On("LoadBlockMeta", height).Return(nil) + + _, _, err := state.Rollback(blockStore, stateStore) + require.Error(t, err) + require.Contains(t, err.Error(), "block at height 100 not found") +} diff --git a/state/services.go b/internal/state/services.go similarity index 94% rename from state/services.go rename to internal/state/services.go index c692d0b94..49388cc12 100644 --- a/state/services.go +++ b/internal/state/services.go @@ -9,7 +9,7 @@ import ( // NOTE: Interfaces used by RPC must be thread safe! //------------------------------------------------------ -//go:generate ../scripts/mockery_generate.sh BlockStore +//go:generate ../../scripts/mockery_generate.sh BlockStore //------------------------------------------------------ // blockstore @@ -38,7 +38,7 @@ type BlockStore interface { //----------------------------------------------------------------------------- // evidence pool -//go:generate ../scripts/mockery_generate.sh EvidencePool +//go:generate ../../scripts/mockery_generate.sh EvidencePool // EvidencePool defines the EvidencePool interface used by State. type EvidencePool interface { diff --git a/state/state.go b/internal/state/state.go similarity index 95% rename from state/state.go rename to internal/state/state.go index 132a86fda..1b3c8f16e 100644 --- a/state/state.go +++ b/internal/state/state.go @@ -9,7 +9,6 @@ import ( "github.com/gogo/protobuf/proto" - tmtime "github.com/tendermint/tendermint/libs/time" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmversion "github.com/tendermint/tendermint/proto/tendermint/version" "github.com/tendermint/tendermint/types" @@ -65,6 +64,8 @@ func VersionFromProto(v tmstate.Version) Version { // Instead, use state.Copy() or updateState(...). // NOTE: not goroutine-safe. type State struct { + // FIXME: This can be removed as TMVersion is a constant, and version.Consensus should + // eventually be replaced by VersionParams in ConsensusParams Version Version // immutable @@ -194,8 +195,8 @@ func (state *State) ToProto() (*tmstate.State, error) { return sm, nil } -// StateFromProto takes a state proto message & returns the local state type -func StateFromProto(pb *tmstate.State) (*State, error) { //nolint:golint +// FromProto takes a state proto message & returns the local state type +func FromProto(pb *tmstate.State) (*State, error) { if pb == nil { return nil, errors.New("nil State") } @@ -287,7 +288,7 @@ func (state State) MakeBlock( // the votes sent by honest processes, i.e., a faulty processes can not arbitrarily increase or decrease the // computed value. func MedianTime(commit *types.Commit, validators *types.ValidatorSet) time.Time { - weightedTimes := make([]*tmtime.WeightedTime, len(commit.Signatures)) + weightedTimes := make([]*weightedTime, len(commit.Signatures)) totalVotingPower := int64(0) for i, commitSig := range commit.Signatures { @@ -298,11 +299,11 @@ func MedianTime(commit *types.Commit, validators *types.ValidatorSet) time.Time // If there's no condition, TestValidateBlockCommit panics; not needed normally. if validator != nil { totalVotingPower += validator.VotingPower - weightedTimes[i] = tmtime.NewWeightedTime(commitSig.Timestamp, validator.VotingPower) + weightedTimes[i] = newWeightedTime(commitSig.Timestamp, validator.VotingPower) } } - return tmtime.WeightedMedian(weightedTimes, totalVotingPower) + return weightedMedian(weightedTimes, totalVotingPower) } //------------------------------------------------------------------------ diff --git a/state/state_test.go b/internal/state/state_test.go similarity index 95% rename from state/state_test.go rename to internal/state/state_test.go index 99d45bb62..8c0144abd 100644 --- a/state/state_test.go +++ b/internal/state/state_test.go @@ -12,36 +12,35 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" + sm "github.com/tendermint/tendermint/internal/state" + statefactory "github.com/tendermint/tendermint/internal/state/test/factory" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" - sm "github.com/tendermint/tendermint/state" - sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/types" ) // setupTestCase does setup common to all test cases. func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, sm.State) { - config := cfg.ResetTestRoot("state_") - dbType := dbm.BackendType(config.DBBackend) - stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) + cfg := config.ResetTestRoot("state_") + dbType := dbm.BackendType(cfg.DBBackend) + stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) require.NoError(t, err) stateStore := sm.NewStore(stateDB) state, err := stateStore.Load() require.NoError(t, err) require.Empty(t, state) - state, err = sm.MakeGenesisStateFromFile(config.GenesisFile()) + state, err = sm.MakeGenesisStateFromFile(cfg.GenesisFile()) assert.NoError(t, err) assert.NotNil(t, state) err = stateStore.Save(state) require.NoError(t, err) - tearDown := func(t *testing.T) { os.RemoveAll(config.RootDir) } + tearDown := func(t *testing.T) { os.RemoveAll(cfg.RootDir) } return tearDown, stateDB, state } @@ -106,7 +105,7 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { state.LastBlockHeight++ // Build mock responses. - block := sf.MakeBlock(state, 2, new(types.Commit)) + block := statefactory.MakeBlock(state, 2, new(types.Commit)) abciResponses := new(tmstate.ABCIResponses) dtxs := make([]*abci.ResponseDeliverTx, 2) @@ -114,7 +113,7 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { abciResponses.DeliverTxs[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Events: nil} abciResponses.DeliverTxs[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Events: nil} - pbpk, err := cryptoenc.PubKeyToProto(ed25519.GenPrivKey().PubKey()) + pbpk, err := encoding.PubKeyToProto(ed25519.GenPrivKey().PubKey()) require.NoError(t, err) abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{{PubKey: pbpk, Power: 10}}} @@ -448,7 +447,7 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { // NewValidatorSet calls IncrementProposerPriority but uses on a copy of val1 assert.EqualValues(t, 0, val1.ProposerPriority) - block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} abciResponses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, @@ -465,7 +464,7 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { // add a validator val2PubKey := ed25519.GenPrivKey().PubKey() val2VotingPower := int64(100) - fvp, err := cryptoenc.PubKeyToProto(val2PubKey) + fvp, err := encoding.PubKeyToProto(val2PubKey) require.NoError(t, err) updateAddVal := abci.ValidatorUpdate{PubKey: fvp, Power: val2VotingPower} @@ -562,7 +561,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { // we only have one validator: assert.Equal(t, val1PubKey.Address(), state.Validators.Proposer.Address) - block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} // no updates: abciResponses := &tmstate.ABCIResponses{ @@ -583,7 +582,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { // add a validator with the same voting power as the first val2PubKey := ed25519.GenPrivKey().PubKey() - fvp, err := cryptoenc.PubKeyToProto(val2PubKey) + fvp, err := encoding.PubKeyToProto(val2PubKey) require.NoError(t, err) updateAddVal := abci.ValidatorUpdate{PubKey: fvp, Power: val1VotingPower} validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal}) @@ -749,7 +748,7 @@ func TestLargeGenesisValidator(t *testing.T) { validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) @@ -769,7 +768,7 @@ func TestLargeGenesisValidator(t *testing.T) { // see: https://github.com/tendermint/tendermint/issues/2960 firstAddedValPubKey := ed25519.GenPrivKey().PubKey() firstAddedValVotingPower := int64(10) - fvp, err := cryptoenc.PubKeyToProto(firstAddedValPubKey) + fvp, err := encoding.PubKeyToProto(firstAddedValPubKey) require.NoError(t, err) firstAddedVal := abci.ValidatorUpdate{PubKey: fvp, Power: firstAddedValVotingPower} validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{firstAddedVal}) @@ -778,7 +777,7 @@ func TestLargeGenesisValidator(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{firstAddedVal}}, } - block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -793,7 +792,7 @@ func TestLargeGenesisValidator(t *testing.T) { validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block := sf.MakeBlock(lastState, lastState.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(lastState, lastState.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedStateInner, err := sm.UpdateState(lastState, blockID, &block.Header, abciResponses, validatorUpdates) @@ -816,7 +815,7 @@ func TestLargeGenesisValidator(t *testing.T) { // add 10 validators with the same voting power as the one added directly after genesis: for i := 0; i < 10; i++ { addedPubKey := ed25519.GenPrivKey().PubKey() - ap, err := cryptoenc.PubKeyToProto(addedPubKey) + ap, err := encoding.PubKeyToProto(addedPubKey) require.NoError(t, err) addedVal := abci.ValidatorUpdate{PubKey: ap, Power: firstAddedValVotingPower} validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{addedVal}) @@ -826,7 +825,7 @@ func TestLargeGenesisValidator(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{addedVal}}, } - block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} state, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -834,14 +833,14 @@ func TestLargeGenesisValidator(t *testing.T) { require.Equal(t, 10+2, len(state.NextValidators.Validators)) // remove genesis validator: - gp, err := cryptoenc.PubKeyToProto(genesisPubKey) + gp, err := encoding.PubKeyToProto(genesisPubKey) require.NoError(t, err) removeGenesisVal := abci.ValidatorUpdate{PubKey: gp, Power: 0} abciResponses = &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{removeGenesisVal}}, } - block = sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) + block = statefactory.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) @@ -862,7 +861,7 @@ func TestLargeGenesisValidator(t *testing.T) { } validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block = sf.MakeBlock(curState, curState.LastBlockHeight+1, new(types.Commit)) + block = statefactory.MakeBlock(curState, curState.LastBlockHeight+1, new(types.Commit)) blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} curState, err = sm.UpdateState(curState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -887,7 +886,7 @@ func TestLargeGenesisValidator(t *testing.T) { validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block := sf.MakeBlock(updatedState, updatedState.LastBlockHeight+1, new(types.Commit)) + block := statefactory.MakeBlock(updatedState, updatedState.LastBlockHeight+1, new(types.Commit)) blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedState, err = sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) @@ -982,7 +981,7 @@ func TestStateMakeBlock(t *testing.T) { proposerAddress := state.Validators.GetProposer().Address stateVersion := state.Version.Consensus - block := sf.MakeBlock(state, 2, new(types.Commit)) + block := statefactory.MakeBlock(state, 2, new(types.Commit)) // test we set some fields assert.Equal(t, stateVersion, block.Version) @@ -1080,7 +1079,7 @@ func TestStateProto(t *testing.T) { assert.NoError(t, err, tt.testName) } - smt, err := sm.StateFromProto(pbs) + smt, err := sm.FromProto(pbs) if tt.expPass2 { require.NoError(t, err, tt.testName) require.Equal(t, tt.state, smt, tt.testName) diff --git a/state/store.go b/internal/state/store.go similarity index 99% rename from state/store.go rename to internal/state/store.go index 5ce11e47d..aff165aa1 100644 --- a/state/store.go +++ b/internal/state/store.go @@ -68,7 +68,7 @@ func init() { //---------------------- -//go:generate ../scripts/mockery_generate.sh Store +//go:generate ../../scripts/mockery_generate.sh Store // Store defines the state store interface // @@ -130,7 +130,7 @@ func (store dbStore) loadState(key []byte) (state State, err error) { %v\n`, err)) } - sm, err := StateFromProto(sp) + sm, err := FromProto(sp) if err != nil { return state, err } diff --git a/state/store_test.go b/internal/state/store_test.go similarity index 96% rename from state/store_test.go rename to internal/state/store_test.go index 5d32040b5..118350fff 100644 --- a/state/store_test.go +++ b/internal/state/store_test.go @@ -7,17 +7,16 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/test/factory" tmrand "github.com/tendermint/tendermint/libs/rand" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -102,13 +101,13 @@ func TestStoreLoadValidators(t *testing.T) { func BenchmarkLoadValidators(b *testing.B) { const valSetSize = 100 - config := cfg.ResetTestRoot("state_") - defer os.RemoveAll(config.RootDir) - dbType := dbm.BackendType(config.DBBackend) - stateDB, err := dbm.NewDB("state", dbType, config.DBDir()) + cfg := config.ResetTestRoot("state_") + defer os.RemoveAll(cfg.RootDir) + dbType := dbm.BackendType(cfg.DBBackend) + stateDB, err := dbm.NewDB("state", dbType, cfg.DBDir()) require.NoError(b, err) stateStore := sm.NewStore(stateDB) - state, err := sm.MakeGenesisStateFromFile(config.GenesisFile()) + state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) if err != nil { b.Fatal(err) } diff --git a/state/test/factory/block.go b/internal/state/test/factory/block.go similarity index 96% rename from state/test/factory/block.go rename to internal/state/test/factory/block.go index b4eb83fa7..dfcf5ebd9 100644 --- a/state/test/factory/block.go +++ b/internal/state/test/factory/block.go @@ -3,8 +3,8 @@ package factory import ( "time" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/test/factory" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) diff --git a/internal/state/time.go b/internal/state/time.go new file mode 100644 index 000000000..c0770b3af --- /dev/null +++ b/internal/state/time.go @@ -0,0 +1,46 @@ +package state + +import ( + "sort" + "time" +) + +// weightedTime for computing a median. +type weightedTime struct { + Time time.Time + Weight int64 +} + +// newWeightedTime with time and weight. +func newWeightedTime(time time.Time, weight int64) *weightedTime { + return &weightedTime{ + Time: time, + Weight: weight, + } +} + +// weightedMedian computes weighted median time for a given array of WeightedTime and the total voting power. +func weightedMedian(weightedTimes []*weightedTime, totalVotingPower int64) (res time.Time) { + median := totalVotingPower / 2 + + sort.Slice(weightedTimes, func(i, j int) bool { + if weightedTimes[i] == nil { + return false + } + if weightedTimes[j] == nil { + return true + } + return weightedTimes[i].Time.UnixNano() < weightedTimes[j].Time.UnixNano() + }) + + for _, weightedTime := range weightedTimes { + if weightedTime != nil { + if median <= weightedTime.Weight { + res = weightedTime.Time + break + } + median -= weightedTime.Weight + } + } + return +} diff --git a/libs/time/time_test.go b/internal/state/time_test.go similarity index 50% rename from libs/time/time_test.go rename to internal/state/time_test.go index 1b1a30e50..893ade7ea 100644 --- a/libs/time/time_test.go +++ b/internal/state/time_test.go @@ -1,54 +1,55 @@ -package time +package state import ( "testing" "time" "github.com/stretchr/testify/assert" + tmtime "github.com/tendermint/tendermint/libs/time" ) func TestWeightedMedian(t *testing.T) { - m := make([]*WeightedTime, 3) + m := make([]*weightedTime, 3) - t1 := Now() + t1 := tmtime.Now() t2 := t1.Add(5 * time.Second) t3 := t1.Add(10 * time.Second) - m[2] = NewWeightedTime(t1, 33) // faulty processes - m[0] = NewWeightedTime(t2, 40) // correct processes - m[1] = NewWeightedTime(t3, 27) // correct processes + m[2] = newWeightedTime(t1, 33) // faulty processes + m[0] = newWeightedTime(t2, 40) // correct processes + m[1] = newWeightedTime(t3, 27) // correct processes totalVotingPower := int64(100) - median := WeightedMedian(m, totalVotingPower) + median := weightedMedian(m, totalVotingPower) assert.Equal(t, t2, median) // median always returns value between values of correct processes assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && (median.Before(t3) || median.Equal(t3))) - m[1] = NewWeightedTime(t1, 40) // correct processes - m[2] = NewWeightedTime(t2, 27) // correct processes - m[0] = NewWeightedTime(t3, 33) // faulty processes + m[1] = newWeightedTime(t1, 40) // correct processes + m[2] = newWeightedTime(t2, 27) // correct processes + m[0] = newWeightedTime(t3, 33) // faulty processes totalVotingPower = int64(100) - median = WeightedMedian(m, totalVotingPower) + median = weightedMedian(m, totalVotingPower) assert.Equal(t, t2, median) // median always returns value between values of correct processes assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && (median.Before(t2) || median.Equal(t2))) - m = make([]*WeightedTime, 8) + m = make([]*weightedTime, 8) t4 := t1.Add(15 * time.Second) t5 := t1.Add(60 * time.Second) - m[3] = NewWeightedTime(t1, 10) // correct processes - m[1] = NewWeightedTime(t2, 10) // correct processes - m[5] = NewWeightedTime(t2, 10) // correct processes - m[4] = NewWeightedTime(t3, 23) // faulty processes - m[0] = NewWeightedTime(t4, 20) // correct processes - m[7] = NewWeightedTime(t5, 10) // faulty processes + m[3] = newWeightedTime(t1, 10) // correct processes + m[1] = newWeightedTime(t2, 10) // correct processes + m[5] = newWeightedTime(t2, 10) // correct processes + m[4] = newWeightedTime(t3, 23) // faulty processes + m[0] = newWeightedTime(t4, 20) // correct processes + m[7] = newWeightedTime(t5, 10) // faulty processes totalVotingPower = int64(83) - median = WeightedMedian(m, totalVotingPower) + median = weightedMedian(m, totalVotingPower) assert.Equal(t, t3, median) // median always returns value between values of correct processes assert.Equal(t, true, (median.After(t1) || median.Equal(t1)) && diff --git a/state/tx_filter.go b/internal/state/tx_filter.go similarity index 65% rename from state/tx_filter.go rename to internal/state/tx_filter.go index 61340e135..871e08ae6 100644 --- a/state/tx_filter.go +++ b/internal/state/tx_filter.go @@ -1,22 +1,22 @@ package state import ( - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/types" ) // TxPreCheck returns a function to filter transactions before processing. // The function limits the size of a transaction to the block's maximum data size. -func TxPreCheck(state State) mempl.PreCheckFunc { +func TxPreCheck(state State) mempool.PreCheckFunc { maxDataBytes := types.MaxDataBytesNoEvidence( state.ConsensusParams.Block.MaxBytes, state.Validators.Size(), ) - return mempl.PreCheckMaxBytes(maxDataBytes) + return mempool.PreCheckMaxBytes(maxDataBytes) } // TxPostCheck returns a function to filter transactions after processing. // The function limits the gas wanted by a transaction to the block's maximum total gas. -func TxPostCheck(state State) mempl.PostCheckFunc { - return mempl.PostCheckMaxGas(state.ConsensusParams.Block.MaxGas) +func TxPostCheck(state State) mempool.PostCheckFunc { + return mempool.PostCheckMaxGas(state.ConsensusParams.Block.MaxGas) } diff --git a/state/tx_filter_test.go b/internal/state/tx_filter_test.go similarity index 94% rename from state/tx_filter_test.go rename to internal/state/tx_filter_test.go index d6236fcbf..27af28a40 100644 --- a/state/tx_filter_test.go +++ b/internal/state/tx_filter_test.go @@ -6,8 +6,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + sm "github.com/tendermint/tendermint/internal/state" tmrand "github.com/tendermint/tendermint/libs/rand" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) diff --git a/state/validation.go b/internal/state/validation.go similarity index 100% rename from state/validation.go rename to internal/state/validation.go diff --git a/state/validation_test.go b/internal/state/validation_test.go similarity index 92% rename from state/validation_test.go rename to internal/state/validation_test.go index 151f2be61..eb0cebbb7 100644 --- a/state/validation_test.go +++ b/internal/state/validation_test.go @@ -8,21 +8,21 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" memmock "github.com/tendermint/tendermint/internal/mempool/mock" - "github.com/tendermint/tendermint/internal/test/factory" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/mocks" + statefactory "github.com/tendermint/tendermint/internal/state/test/factory" + "github.com/tendermint/tendermint/internal/store" + testfactory "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmtime "github.com/tendermint/tendermint/libs/time" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/mocks" - sf "github.com/tendermint/tendermint/state/test/factory" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) const validationTestsStopHeight int64 = 10 @@ -90,7 +90,7 @@ func TestValidateBlockHeader(t *testing.T) { Invalid blocks don't pass */ for _, tc := range testCases { - block := sf.MakeBlock(state, height, lastCommit) + block := statefactory.MakeBlock(state, height, lastCommit) tc.malleateBlock(block) err := blockExec.ValidateBlock(state, block) t.Logf("%s: %v", tc.name, err) @@ -107,7 +107,7 @@ func TestValidateBlockHeader(t *testing.T) { } nextHeight := validationTestsStopHeight - block := sf.MakeBlock(state, nextHeight, lastCommit) + block := statefactory.MakeBlock(state, nextHeight, lastCommit) state.InitialHeight = nextHeight + 1 err := blockExec.ValidateBlock(state, block) require.Error(t, err, "expected an error when state is ahead of block") @@ -141,7 +141,7 @@ func TestValidateBlockCommit(t *testing.T) { #2589: ensure state.LastValidators.VerifyCommit fails here */ // should be height-1 instead of height - wrongHeightVote, err := factory.MakeVote( + wrongHeightVote, err := testfactory.MakeVote( privVals[proposerAddr.String()], chainID, 1, @@ -158,7 +158,7 @@ func TestValidateBlockCommit(t *testing.T) { state.LastBlockID, []types.CommitSig{wrongHeightVote.CommitSig()}, ) - block := sf.MakeBlock(state, height, wrongHeightCommit) + block := statefactory.MakeBlock(state, height, wrongHeightCommit) err = blockExec.ValidateBlock(state, block) _, isErrInvalidCommitHeight := err.(types.ErrInvalidCommitHeight) require.True(t, isErrInvalidCommitHeight, "expected ErrInvalidCommitHeight at height %d but got: %v", height, err) @@ -166,7 +166,7 @@ func TestValidateBlockCommit(t *testing.T) { /* #2589: test len(block.LastCommit.Signatures) == state.LastValidators.Size() */ - block = sf.MakeBlock(state, height, wrongSigsCommit) + block = statefactory.MakeBlock(state, height, wrongSigsCommit) err = blockExec.ValidateBlock(state, block) _, isErrInvalidCommitSignatures := err.(types.ErrInvalidCommitSignatures) require.True(t, isErrInvalidCommitSignatures, @@ -195,7 +195,7 @@ func TestValidateBlockCommit(t *testing.T) { /* wrongSigsCommit is fine except for the extra bad precommit */ - goodVote, err := factory.MakeVote( + goodVote, err := testfactory.MakeVote( privVals[proposerAddr.String()], chainID, 1, @@ -270,7 +270,7 @@ func TestValidateBlockEvidence(t *testing.T) { A block with too much evidence fails */ evidence := make([]types.Evidence, 0) - var currentBytes int64 = 0 + var currentBytes int64 // more bytes than the maximum allowed for evidence for currentBytes <= maxBytesEvidence { newEv := types.NewMockDuplicateVoteEvidenceWithValidator(height, time.Now(), @@ -278,7 +278,7 @@ func TestValidateBlockEvidence(t *testing.T) { evidence = append(evidence, newEv) currentBytes += int64(len(newEv.Bytes())) } - block, _ := state.MakeBlock(height, factory.MakeTenTxs(height), lastCommit, evidence, proposerAddr) + block, _ := state.MakeBlock(height, testfactory.MakeTenTxs(height), lastCommit, evidence, proposerAddr) err := blockExec.ValidateBlock(state, block) if assert.Error(t, err) { _, ok := err.(*types.ErrEvidenceOverflow) @@ -290,7 +290,7 @@ func TestValidateBlockEvidence(t *testing.T) { A good block with several pieces of good evidence passes */ evidence := make([]types.Evidence, 0) - var currentBytes int64 = 0 + var currentBytes int64 // precisely the amount of allowed evidence for { newEv := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime, diff --git a/internal/statesync/block_queue.go b/internal/statesync/block_queue.go index 56ed3c376..80b0ffbd5 100644 --- a/internal/statesync/block_queue.go +++ b/internal/statesync/block_queue.go @@ -200,7 +200,7 @@ func (q *blockQueue) retry(height int64) { // Success is called when a light block has been successfully verified and // processed -func (q *blockQueue) success(height int64) { +func (q *blockQueue) success() { q.mtx.Lock() defer q.mtx.Unlock() if q.terminal != nil && q.verifyHeight == q.terminal.Height { diff --git a/internal/statesync/block_queue_test.go b/internal/statesync/block_queue_test.go index 3a4c71e4e..ad28efac9 100644 --- a/internal/statesync/block_queue_test.go +++ b/internal/statesync/block_queue_test.go @@ -58,7 +58,7 @@ loop: // assert that the queue serializes the blocks require.Equal(t, resp.block.Height, trackingHeight) trackingHeight-- - queue.success(resp.block.Height) + queue.success() } } @@ -105,7 +105,7 @@ func TestBlockQueueWithFailures(t *testing.T) { queue.retry(resp.block.Height) } else { trackingHeight-- - queue.success(resp.block.Height) + queue.success() } case <-queue.done(): @@ -223,7 +223,7 @@ func TestBlockQueueStopTime(t *testing.T) { // assert that the queue serializes the blocks assert.Equal(t, resp.block.Height, trackingHeight) trackingHeight-- - queue.success(resp.block.Height) + queue.success() case <-queue.done(): wg.Wait() @@ -268,14 +268,16 @@ loop: case resp := <-queue.verifyNext(): require.GreaterOrEqual(t, resp.block.Height, initialHeight) - queue.success(resp.block.Height) + queue.success() } } } func mockLBResp(t *testing.T, peer types.NodeID, height int64, time time.Time) lightBlockResponse { + vals, pv := factory.RandValidatorSet(3, 10) + _, _, lb := mockLB(t, height, time, factory.MakeBlockID(), vals, pv) return lightBlockResponse{ - block: mockLB(t, height, time, factory.MakeBlockID()), + block: lb, peer: peer, } } diff --git a/internal/statesync/chunks.go b/internal/statesync/chunks.go index 590f128da..84b6971b8 100644 --- a/internal/statesync/chunks.go +++ b/internal/statesync/chunks.go @@ -355,3 +355,16 @@ func (q *chunkQueue) WaitFor(index uint32) <-chan uint32 { return ch } + +func (q *chunkQueue) numChunksReturned() int { + q.Lock() + defer q.Unlock() + + cnt := 0 + for _, b := range q.chunkReturned { + if b { + cnt++ + } + } + return cnt +} diff --git a/internal/statesync/chunks_test.go b/internal/statesync/chunks_test.go index ad7f19b3b..e17c170bd 100644 --- a/internal/statesync/chunks_test.go +++ b/internal/statesync/chunks_test.go @@ -421,15 +421,7 @@ func TestChunkQueue_Retry(t *testing.T) { queue, teardown := setupChunkQueue(t) defer teardown() - // Allocate and add all chunks to the queue - for i := uint32(0); i < queue.Size(); i++ { - _, err := queue.Allocate() - require.NoError(t, err) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: i, Chunk: []byte{byte(i)}}) - require.NoError(t, err) - _, err = queue.Next() - require.NoError(t, err) - } + allocateAddChunksToQueue(t, queue) // Retrying a couple of chunks makes Next() return them, but they are not allocatable queue.Retry(3) @@ -454,15 +446,7 @@ func TestChunkQueue_RetryAll(t *testing.T) { queue, teardown := setupChunkQueue(t) defer teardown() - // Allocate and add all chunks to the queue - for i := uint32(0); i < queue.Size(); i++ { - _, err := queue.Allocate() - require.NoError(t, err) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: i, Chunk: []byte{byte(i)}}) - require.NoError(t, err) - _, err = queue.Next() - require.NoError(t, err) - } + allocateAddChunksToQueue(t, queue) _, err := queue.Next() assert.Equal(t, errDone, err) @@ -552,3 +536,29 @@ func TestChunkQueue_WaitFor(t *testing.T) { _, ok = <-w assert.False(t, ok) } + +func TestNumChunkReturned(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + assert.EqualValues(t, 5, queue.Size()) + + allocateAddChunksToQueue(t, queue) + assert.EqualValues(t, 5, queue.numChunksReturned()) + + err := queue.Close() + require.NoError(t, err) +} + +// Allocate and add all chunks to the queue +func allocateAddChunksToQueue(t *testing.T, q *chunkQueue) { + t.Helper() + for i := uint32(0); i < q.Size(); i++ { + _, err := q.Allocate() + require.NoError(t, err) + _, err = q.Add(&chunk{Height: 3, Format: 1, Index: i, Chunk: []byte{byte(i)}}) + require.NoError(t, err) + _, err = q.Next() + require.NoError(t, err) + } +} diff --git a/internal/statesync/dispatcher.go b/internal/statesync/dispatcher.go index 394b77e38..844cb5e32 100644 --- a/internal/statesync/dispatcher.go +++ b/internal/statesync/dispatcher.go @@ -5,181 +5,90 @@ import ( "errors" "fmt" "sync" - "time" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/light/provider" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - proto "github.com/tendermint/tendermint/proto/tendermint/types" + tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" ) var ( errNoConnectedPeers = errors.New("no available peers to dispatch request to") errUnsolicitedResponse = errors.New("unsolicited light block response") - errNoResponse = errors.New("peer failed to respond within timeout") errPeerAlreadyBusy = errors.New("peer is already processing a request") - errDisconnected = errors.New("dispatcher has been disconnected") + errDisconnected = errors.New("dispatcher disconnected") ) -// dispatcher keeps a list of peers and allows concurrent requests for light -// blocks. NOTE: It is not the responsibility of the dispatcher to verify the -// light blocks. -type dispatcher struct { - availablePeers *peerlist - requestCh chan<- p2p.Envelope - timeout time.Duration +// A Dispatcher multiplexes concurrent requests by multiple peers for light blocks. +// Only one request per peer can be sent at a time. Subsequent concurrent requests will +// report an error from the LightBlock method. +// NOTE: It is not the responsibility of the dispatcher to verify the light blocks. +type Dispatcher struct { + // the channel with which to send light block requests on + requestCh chan<- p2p.Envelope + closeCh chan struct{} - mtx sync.Mutex - calls map[types.NodeID]chan *types.LightBlock - running bool + mtx sync.Mutex + // all pending calls that have been dispatched and are awaiting an answer + calls map[types.NodeID]chan *types.LightBlock } -func newDispatcher(requestCh chan<- p2p.Envelope, timeout time.Duration) *dispatcher { - return &dispatcher{ - availablePeers: newPeerList(), - timeout: timeout, - requestCh: requestCh, - calls: make(map[types.NodeID]chan *types.LightBlock), - running: true, +func NewDispatcher(requestCh chan<- p2p.Envelope) *Dispatcher { + return &Dispatcher{ + requestCh: requestCh, + closeCh: make(chan struct{}), + calls: make(map[types.NodeID]chan *types.LightBlock), } } -// LightBlock uses the request channel to fetch a light block from the next peer -// in a list, tracks the call and waits for the reactor to pass along the response -func (d *dispatcher) LightBlock(ctx context.Context, height int64) (*types.LightBlock, types.NodeID, error) { - d.mtx.Lock() - // check to see that the dispatcher is connected to at least one peer - if d.availablePeers.Len() == 0 && len(d.calls) == 0 { - d.mtx.Unlock() - return nil, "", errNoConnectedPeers - } - d.mtx.Unlock() - - // fetch the next peer id in the list and request a light block from that - // peer - peer := d.availablePeers.Pop(ctx) - lb, err := d.lightBlock(ctx, height, peer) - return lb, peer, err -} - -// Providers turns the dispatcher into a set of providers (per peer) which can -// be used by a light client -func (d *dispatcher) Providers(chainID string, timeout time.Duration) []provider.Provider { - d.mtx.Lock() - defer d.mtx.Unlock() - - providers := make([]provider.Provider, d.availablePeers.Len()) - peers := d.availablePeers.Peers() - for index, peer := range peers { - providers[index] = &blockProvider{ - peer: peer, - dispatcher: d, - chainID: chainID, - timeout: timeout, - } - } - return providers -} - -func (d *dispatcher) stop() { - d.mtx.Lock() - defer d.mtx.Unlock() - d.running = false - for peer, call := range d.calls { - close(call) - delete(d.calls, peer) - } -} - -func (d *dispatcher) start() { - d.mtx.Lock() - defer d.mtx.Unlock() - d.running = true -} - -func (d *dispatcher) lightBlock(ctx context.Context, height int64, peer types.NodeID) (*types.LightBlock, error) { +// LightBlock uses the request channel to fetch a light block from a given peer +// tracking, the call and waiting for the reactor to pass back the response. A nil +// LightBlock response is used to signal that the peer doesn't have the requested LightBlock. +func (d *Dispatcher) LightBlock(ctx context.Context, height int64, peer types.NodeID) (*types.LightBlock, error) { // dispatch the request to the peer callCh, err := d.dispatch(peer, height) if err != nil { return nil, err } + // clean up the call after a response is returned + defer func() { + d.mtx.Lock() + defer d.mtx.Unlock() + if call, ok := d.calls[peer]; ok { + delete(d.calls, peer) + close(call) + } + }() + // wait for a response, cancel or timeout select { case resp := <-callCh: return resp, nil case <-ctx.Done(): - d.release(peer) - return nil, nil + return nil, ctx.Err() - case <-time.After(d.timeout): - d.release(peer) - return nil, errNoResponse - } -} - -// respond allows the underlying process which receives requests on the -// requestCh to respond with the respective light block -func (d *dispatcher) respond(lb *proto.LightBlock, peer types.NodeID) error { - d.mtx.Lock() - defer d.mtx.Unlock() - - // check that the response came from a request - answerCh, ok := d.calls[peer] - if !ok { - // this can also happen if the response came in after the timeout - return errUnsolicitedResponse - } - // release the peer after returning the response - defer d.availablePeers.Append(peer) - defer close(answerCh) - defer delete(d.calls, peer) - - if lb == nil { - answerCh <- nil - return nil - } - - block, err := types.LightBlockFromProto(lb) - if err != nil { - fmt.Println("error with converting light block") - return err - } - - answerCh <- block - return nil -} - -func (d *dispatcher) addPeer(peer types.NodeID) { - d.availablePeers.Append(peer) -} - -func (d *dispatcher) removePeer(peer types.NodeID) { - d.mtx.Lock() - defer d.mtx.Unlock() - if _, ok := d.calls[peer]; ok { - delete(d.calls, peer) - } else { - d.availablePeers.Remove(peer) + case <-d.closeCh: + return nil, errDisconnected } } // dispatch takes a peer and allocates it a channel so long as it's not already // busy and the receiving channel is still running. It then dispatches the message -func (d *dispatcher) dispatch(peer types.NodeID, height int64) (chan *types.LightBlock, error) { +func (d *Dispatcher) dispatch(peer types.NodeID, height int64) (chan *types.LightBlock, error) { d.mtx.Lock() defer d.mtx.Unlock() - ch := make(chan *types.LightBlock, 1) - - // check if the dispatcher is running or not - if !d.running { - close(ch) - return ch, errDisconnected + select { + case <-d.closeCh: + return nil, errDisconnected + default: } - // this should happen only if we add the same peer twice (somehow) + ch := make(chan *types.LightBlock, 1) + + // check if a request for the same peer has already been made if _, ok := d.calls[peer]; ok { close(ch) return ch, errPeerAlreadyBusy @@ -193,47 +102,107 @@ func (d *dispatcher) dispatch(peer types.NodeID, height int64) (chan *types.Ligh Height: uint64(height), }, } + return ch, nil } -// release appends the peer back to the list and deletes the allocated call so -// that a new call can be made to that peer -func (d *dispatcher) release(peer types.NodeID) { +// Respond allows the underlying process which receives requests on the +// requestCh to respond with the respective light block. A nil response is used to +// represent that the receiver of the request does not have a light block at that height. +func (d *Dispatcher) Respond(lb *tmproto.LightBlock, peer types.NodeID) error { d.mtx.Lock() defer d.mtx.Unlock() - if call, ok := d.calls[peer]; ok { - close(call) - delete(d.calls, peer) + + // check that the response came from a request + answerCh, ok := d.calls[peer] + if !ok { + // this can also happen if the response came in after the timeout + return errUnsolicitedResponse } - d.availablePeers.Append(peer) + + // If lb is nil we take that to mean that the peer didn't have the requested light + // block and thus pass on the nil to the caller. + if lb == nil { + answerCh <- nil + return nil + } + + block, err := types.LightBlockFromProto(lb) + if err != nil { + return err + } + + answerCh <- block + return nil +} + +// Close shuts down the dispatcher and cancels any pending calls awaiting responses. +// Peers awaiting responses that have not arrived are delivered a nil block. +func (d *Dispatcher) Close() { + d.mtx.Lock() + defer d.mtx.Unlock() + close(d.closeCh) + for peer, call := range d.calls { + delete(d.calls, peer) + close(call) + } +} + +func (d *Dispatcher) Done() <-chan struct{} { + return d.closeCh } //---------------------------------------------------------------- -// blockProvider is a p2p based light provider which uses a dispatcher connected +// BlockProvider is a p2p based light provider which uses a dispatcher connected // to the state sync reactor to serve light blocks to the light client // // TODO: This should probably be moved over to the light package but as we're // not yet officially supporting p2p light clients we'll leave this here for now. -type blockProvider struct { +// +// NOTE: BlockProvider will return an error with concurrent calls. However, we don't +// need a mutex because a light client (and the backfill process) will never call a +// method more than once at the same time +type BlockProvider struct { peer types.NodeID chainID string - timeout time.Duration - dispatcher *dispatcher + dispatcher *Dispatcher } -func (p *blockProvider) LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) { - // FIXME: The provider doesn't know if the dispatcher is still connected to - // that peer. If the connection is dropped for whatever reason the - // dispatcher needs to be able to relay this back to the provider so it can - // return ErrConnectionClosed instead of ErrNoResponse - ctx, cancel := context.WithTimeout(ctx, p.timeout) - defer cancel() - lb, _ := p.dispatcher.lightBlock(ctx, height, p.peer) - if lb == nil { - return nil, provider.ErrNoResponse +// Creates a block provider which implements the light client Provider interface. +func NewBlockProvider(peer types.NodeID, chainID string, dispatcher *Dispatcher) *BlockProvider { + return &BlockProvider{ + peer: peer, + chainID: chainID, + dispatcher: dispatcher, + } +} + +// LightBlock fetches a light block from the peer at a specified height returning either a +// light block or an appropriate error. +func (p *BlockProvider) LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) { + lb, err := p.dispatcher.LightBlock(ctx, height, p.peer) + switch err { + case nil: + if lb == nil { + return nil, provider.ErrLightBlockNotFound + } + case context.DeadlineExceeded, context.Canceled: + return nil, err + case errPeerAlreadyBusy: + return nil, provider.ErrLightBlockNotFound + default: + return nil, provider.ErrUnreliableProvider{Reason: err.Error()} } + // check that the height requested is the same one returned + if lb.Height != height { + return nil, provider.ErrBadLightBlock{ + Reason: fmt.Errorf("expected height %d, got height %d", height, lb.Height), + } + } + + // perform basic validation if err := lb.ValidateBasic(p.chainID); err != nil { return nil, provider.ErrBadLightBlock{Reason: err} } @@ -245,37 +214,37 @@ func (p *blockProvider) LightBlock(ctx context.Context, height int64) (*types.Li // attacks. This is a no op as there currently isn't a way to wire this up to // the evidence reactor (we should endeavor to do this in the future but for now // it's not critical for backwards verification) -func (p *blockProvider) ReportEvidence(ctx context.Context, ev types.Evidence) error { +func (p *BlockProvider) ReportEvidence(ctx context.Context, ev types.Evidence) error { return nil } // String implements stringer interface -func (p *blockProvider) String() string { return string(p.peer) } +func (p *BlockProvider) String() string { return string(p.peer) } //---------------------------------------------------------------- // peerList is a rolling list of peers. This is used to distribute the load of // retrieving blocks over all the peers the reactor is connected to -type peerlist struct { +type peerList struct { mtx sync.Mutex peers []types.NodeID waiting []chan types.NodeID } -func newPeerList() *peerlist { - return &peerlist{ +func newPeerList() *peerList { + return &peerList{ peers: make([]types.NodeID, 0), waiting: make([]chan types.NodeID, 0), } } -func (l *peerlist) Len() int { +func (l *peerList) Len() int { l.mtx.Lock() defer l.mtx.Unlock() return len(l.peers) } -func (l *peerlist) Pop(ctx context.Context) types.NodeID { +func (l *peerList) Pop(ctx context.Context) types.NodeID { l.mtx.Lock() if len(l.peers) == 0 { // if we don't have any peers in the list we block until a peer is @@ -299,7 +268,7 @@ func (l *peerlist) Pop(ctx context.Context) types.NodeID { return peer } -func (l *peerlist) Append(peer types.NodeID) { +func (l *peerList) Append(peer types.NodeID) { l.mtx.Lock() defer l.mtx.Unlock() if len(l.waiting) > 0 { @@ -312,7 +281,7 @@ func (l *peerlist) Append(peer types.NodeID) { } } -func (l *peerlist) Remove(peer types.NodeID) { +func (l *peerList) Remove(peer types.NodeID) { l.mtx.Lock() defer l.mtx.Unlock() for i, p := range l.peers { @@ -323,8 +292,21 @@ func (l *peerlist) Remove(peer types.NodeID) { } } -func (l *peerlist) Peers() []types.NodeID { +func (l *peerList) All() []types.NodeID { l.mtx.Lock() defer l.mtx.Unlock() return l.peers } + +func (l *peerList) Contains(id types.NodeID) bool { + l.mtx.Lock() + defer l.mtx.Unlock() + + for _, p := range l.peers { + if id == p { + return true + } + } + + return false +} diff --git a/internal/statesync/dispatcher_test.go b/internal/statesync/dispatcher_test.go index 469630894..e5a6a85cd 100644 --- a/internal/statesync/dispatcher_test.go +++ b/internal/statesync/dispatcher_test.go @@ -13,145 +13,102 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/test/factory" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" "github.com/tendermint/tendermint/types" ) func TestDispatcherBasic(t *testing.T) { t.Cleanup(leaktest.Check(t)) + const numPeers = 5 ch := make(chan p2p.Envelope, 100) closeCh := make(chan struct{}) defer close(closeCh) - d := newDispatcher(ch, 1*time.Second) - + d := NewDispatcher(ch) go handleRequests(t, d, ch, closeCh) - peers := createPeerSet(5) - for _, peer := range peers { - d.addPeer(peer) - } - + peers := createPeerSet(numPeers) wg := sync.WaitGroup{} // make a bunch of async requests and require that the correct responses are // given - for i := 1; i < 10; i++ { + for i := 0; i < numPeers; i++ { wg.Add(1) go func(height int64) { defer wg.Done() - lb, peer, err := d.LightBlock(context.Background(), height) + lb, err := d.LightBlock(context.Background(), height, peers[height-1]) require.NoError(t, err) require.NotNil(t, lb) require.Equal(t, lb.Height, height) - require.Contains(t, peers, peer) - }(int64(i)) + }(int64(i + 1)) } wg.Wait() + + // assert that all calls were responded to + assert.Empty(t, d.calls) } func TestDispatcherReturnsNoBlock(t *testing.T) { t.Cleanup(leaktest.Check(t)) ch := make(chan p2p.Envelope, 100) - d := newDispatcher(ch, 1*time.Second) - peerFromSet := createPeerSet(1)[0] - d.addPeer(peerFromSet) + d := NewDispatcher(ch) doneCh := make(chan struct{}) + peer := factory.NodeID("a") go func() { <-ch - require.NoError(t, d.respond(nil, peerFromSet)) + require.NoError(t, d.Respond(nil, peer)) close(doneCh) }() - lb, peerResult, err := d.LightBlock(context.Background(), 1) + lb, err := d.LightBlock(context.Background(), 1, peer) <-doneCh require.Nil(t, lb) require.Nil(t, err) - require.Equal(t, peerFromSet, peerResult) } -func TestDispatcherErrorsWhenNoPeers(t *testing.T) { +func TestDispatcherTimeOutWaitingOnLightBlock(t *testing.T) { t.Cleanup(leaktest.Check(t)) ch := make(chan p2p.Envelope, 100) - d := newDispatcher(ch, 1*time.Second) + d := NewDispatcher(ch) + peer := factory.NodeID("a") - lb, peerResult, err := d.LightBlock(context.Background(), 1) + ctx, cancelFunc := context.WithTimeout(context.Background(), 10*time.Millisecond) + defer cancelFunc() + lb, err := d.LightBlock(ctx, 1, peer) + + require.Error(t, err) + require.Equal(t, context.DeadlineExceeded, err) require.Nil(t, lb) - require.Empty(t, peerResult) - require.Equal(t, errNoConnectedPeers, err) -} - -func TestDispatcherReturnsBlockOncePeerAvailable(t *testing.T) { - t.Cleanup(leaktest.Check(t)) - dispatcherRequestCh := make(chan p2p.Envelope, 100) - d := newDispatcher(dispatcherRequestCh, 1*time.Second) - peerFromSet := createPeerSet(1)[0] - d.addPeer(peerFromSet) - ctx := context.Background() - wrapped, cancelFunc := context.WithCancel(ctx) - - doneCh := make(chan struct{}) - go func() { - lb, peerResult, err := d.LightBlock(wrapped, 1) - require.Nil(t, lb) - require.Equal(t, peerFromSet, peerResult) - require.Nil(t, err) - - // calls to dispatcher.Lightblock write into the dispatcher's requestCh. - // we read from the requestCh here to unblock the requestCh for future - // calls. - <-dispatcherRequestCh - close(doneCh) - }() - cancelFunc() - <-doneCh - - go func() { - <-dispatcherRequestCh - lb := &types.LightBlock{} - asProto, err := lb.ToProto() - require.Nil(t, err) - err = d.respond(asProto, peerFromSet) - require.Nil(t, err) - }() - - lb, peerResult, err := d.LightBlock(context.Background(), 1) - - require.NotNil(t, lb) - require.Equal(t, peerFromSet, peerResult) - require.Nil(t, err) } func TestDispatcherProviders(t *testing.T) { t.Cleanup(leaktest.Check(t)) ch := make(chan p2p.Envelope, 100) - chainID := "state-sync-test" + chainID := "test-chain" closeCh := make(chan struct{}) defer close(closeCh) - d := newDispatcher(ch, 1*time.Second) - + d := NewDispatcher(ch) go handleRequests(t, d, ch, closeCh) peers := createPeerSet(5) - for _, peer := range peers { - d.addPeer(peer) + providers := make([]*BlockProvider, len(peers)) + for idx, peer := range peers { + providers[idx] = NewBlockProvider(peer, chainID, d) } - - providers := d.Providers(chainID, 5*time.Second) require.Len(t, providers, 5) + for i, p := range providers { - bp, ok := p.(*blockProvider) - require.True(t, ok) - assert.Equal(t, bp.String(), string(peers[i])) + assert.Equal(t, string(peers[i]), p.String(), i) lb, err := p.LightBlock(context.Background(), 10) - assert.Error(t, err) - assert.Nil(t, lb) + assert.NoError(t, err) + assert.NotNil(t, lb) } } @@ -166,7 +123,7 @@ func TestPeerListBasic(t *testing.T) { peerList.Append(peer) } - for idx, peer := range peerList.Peers() { + for idx, peer := range peerList.All() { assert.Equal(t, peer, peerSet[idx]) } @@ -178,13 +135,22 @@ func TestPeerListBasic(t *testing.T) { } assert.Equal(t, half, peerList.Len()) + // removing a peer that doesn't exist should not change the list peerList.Remove(types.NodeID("lp")) assert.Equal(t, half, peerList.Len()) + // removing a peer that exists should decrease the list size by one peerList.Remove(peerSet[half]) - half++ - assert.Equal(t, peerSet[half], peerList.Pop(ctx)) + assert.Equal(t, numPeers-half-1, peerList.Len()) + // popping the next peer should work as expected + assert.Equal(t, peerSet[half+1], peerList.Pop(ctx)) + assert.Equal(t, numPeers-half-2, peerList.Len()) + + // append the two peers back + peerList.Append(peerSet[half]) + peerList.Append(peerSet[half+1]) + assert.Equal(t, half, peerList.Len()) } func TestPeerListBlocksWhenEmpty(t *testing.T) { @@ -277,9 +243,28 @@ func TestPeerListConcurrent(t *testing.T) { } } +func TestPeerListRemove(t *testing.T) { + peerList := newPeerList() + numPeers := 10 + + peerSet := createPeerSet(numPeers) + for _, peer := range peerSet { + peerList.Append(peer) + } + + for _, peer := range peerSet { + peerList.Remove(peer) + for _, p := range peerList.All() { + require.NotEqual(t, p, peer) + } + numPeers-- + require.Equal(t, numPeers, peerList.Len()) + } +} + // handleRequests is a helper function usually run in a separate go routine to // imitate the expected responses of the reactor wired to the dispatcher -func handleRequests(t *testing.T, d *dispatcher, ch chan p2p.Envelope, closeCh chan struct{}) { +func handleRequests(t *testing.T, d *Dispatcher, ch chan p2p.Envelope, closeCh chan struct{}) { t.Helper() for { select { @@ -288,7 +273,7 @@ func handleRequests(t *testing.T, d *dispatcher, ch chan p2p.Envelope, closeCh c peer := request.To resp := mockLBResp(t, peer, int64(height), time.Now()) block, _ := resp.block.ToProto() - require.NoError(t, d.respond(block, resp.peer)) + require.NoError(t, d.Respond(block, resp.peer)) case <-closeCh: return } diff --git a/internal/statesync/metrics.go b/internal/statesync/metrics.go new file mode 100644 index 000000000..fb134f580 --- /dev/null +++ b/internal/statesync/metrics.go @@ -0,0 +1,91 @@ +package statesync + +import ( + "github.com/go-kit/kit/metrics" + "github.com/go-kit/kit/metrics/discard" + "github.com/go-kit/kit/metrics/prometheus" + stdprometheus "github.com/prometheus/client_golang/prometheus" +) + +const ( + // MetricsSubsystem is a subsystem shared by all metrics exposed by this package. + MetricsSubsystem = "statesync" +) + +// Metrics contains metrics exposed by this package. +type Metrics struct { + TotalSnapshots metrics.Counter + ChunkProcessAvgTime metrics.Gauge + SnapshotHeight metrics.Gauge + SnapshotChunk metrics.Counter + SnapshotChunkTotal metrics.Gauge + BackFilledBlocks metrics.Counter + BackFillBlocksTotal metrics.Gauge +} + +// PrometheusMetrics returns Metrics build using Prometheus client library. +// Optionally, labels can be provided along with their values ("foo", +// "fooValue"). +func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { + labels := []string{} + for i := 0; i < len(labelsAndValues); i += 2 { + labels = append(labels, labelsAndValues[i]) + } + return &Metrics{ + TotalSnapshots: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "total_snapshots", + Help: "The total number of snapshots discovered.", + }, labels).With(labelsAndValues...), + ChunkProcessAvgTime: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "chunk_process_avg_time", + Help: "The average processing time per chunk.", + }, labels).With(labelsAndValues...), + SnapshotHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "snapshot_height", + Help: "The height of the current snapshot the has been processed.", + }, labels).With(labelsAndValues...), + SnapshotChunk: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "snapshot_chunk", + Help: "The current number of chunks that have been processed.", + }, labels).With(labelsAndValues...), + SnapshotChunkTotal: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "snapshot_chunks_total", + Help: "The total number of chunks in the current snapshot.", + }, labels).With(labelsAndValues...), + BackFilledBlocks: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "backfilled_blocks", + Help: "The current number of blocks that have been back-filled.", + }, labels).With(labelsAndValues...), + BackFillBlocksTotal: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "backfilled_blocks_total", + Help: "The total number of blocks that need to be back-filled.", + }, labels).With(labelsAndValues...), + } +} + +// NopMetrics returns no-op Metrics. +func NopMetrics() *Metrics { + return &Metrics{ + TotalSnapshots: discard.NewCounter(), + ChunkProcessAvgTime: discard.NewGauge(), + SnapshotHeight: discard.NewGauge(), + SnapshotChunk: discard.NewCounter(), + SnapshotChunkTotal: discard.NewGauge(), + BackFilledBlocks: discard.NewCounter(), + BackFillBlocksTotal: discard.NewGauge(), + } +} diff --git a/internal/statesync/mock_sync_reactor.go b/internal/statesync/mock_sync_reactor.go deleted file mode 100644 index 6688ce4d2..000000000 --- a/internal/statesync/mock_sync_reactor.go +++ /dev/null @@ -1,50 +0,0 @@ -package statesync - -import ( - "context" - "time" - - mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/state" -) - -// MockSyncReactor is an autogenerated mock type for the SyncReactor type. -// Because of the stateprovider uses in Sync(), we use package statesync instead of mocks. -type MockSyncReactor struct { - mock.Mock -} - -// Backfill provides a mock function with given fields: _a0 -func (_m *MockSyncReactor) Backfill(_a0 state.State) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(state.State) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Sync provides a mock function with given fields: _a0, _a1, _a2 -func (_m *MockSyncReactor) Sync(_a0 context.Context, _a1 StateProvider, _a2 time.Duration) (state.State, error) { - ret := _m.Called(_a0, _a1, _a2) - - var r0 state.State - if rf, ok := ret.Get(0).(func(context.Context, StateProvider, time.Duration) state.State); ok { - r0 = rf(_a0, _a1, _a2) - } else { - r0 = ret.Get(0).(state.State) - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, StateProvider, time.Duration) error); ok { - r1 = rf(_a0, _a1, _a2) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/internal/statesync/mocks/Metricer.go b/internal/statesync/mocks/Metricer.go new file mode 100644 index 000000000..c4721b304 --- /dev/null +++ b/internal/statesync/mocks/Metricer.go @@ -0,0 +1,112 @@ +// Code generated by mockery 2.9.4. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + + time "time" +) + +// Metricer is an autogenerated mock type for the Metricer type +type Metricer struct { + mock.Mock +} + +// BackFillBlocksTotal provides a mock function with given fields: +func (_m *Metricer) BackFillBlocksTotal() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// BackFilledBlocks provides a mock function with given fields: +func (_m *Metricer) BackFilledBlocks() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// ChunkProcessAvgTime provides a mock function with given fields: +func (_m *Metricer) ChunkProcessAvgTime() time.Duration { + ret := _m.Called() + + var r0 time.Duration + if rf, ok := ret.Get(0).(func() time.Duration); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(time.Duration) + } + + return r0 +} + +// SnapshotChunksCount provides a mock function with given fields: +func (_m *Metricer) SnapshotChunksCount() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// SnapshotChunksTotal provides a mock function with given fields: +func (_m *Metricer) SnapshotChunksTotal() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// SnapshotHeight provides a mock function with given fields: +func (_m *Metricer) SnapshotHeight() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} + +// TotalSnapshots provides a mock function with given fields: +func (_m *Metricer) TotalSnapshots() int64 { + ret := _m.Called() + + var r0 int64 + if rf, ok := ret.Get(0).(func() int64); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(int64) + } + + return r0 +} diff --git a/internal/statesync/mocks/state_provider.go b/internal/statesync/mocks/state_provider.go index 538c619fc..b8d681631 100644 --- a/internal/statesync/mocks/state_provider.go +++ b/internal/statesync/mocks/state_provider.go @@ -6,7 +6,7 @@ import ( context "context" mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/state" + state "github.com/tendermint/tendermint/internal/state" types "github.com/tendermint/tendermint/types" ) diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 59cbabd14..939fb409c 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -14,61 +14,20 @@ import ( "github.com/tendermint/tendermint/config" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/light" + "github.com/tendermint/tendermint/light/provider" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) var ( _ service.Service = (*Reactor)(nil) _ p2p.Wrapper = (*ssproto.Message)(nil) - - // ChannelShims contains a map of ChannelDescriptorShim objects, where each - // object wraps a reference to a legacy p2p ChannelDescriptor and the corresponding - // p2p proto.Message the new p2p Channel is responsible for handling. - // - // - // TODO: Remove once p2p refactor is complete. - // ref: https://github.com/tendermint/tendermint/issues/5670 - ChannelShims = map[p2p.ChannelID]*p2p.ChannelDescriptorShim{ - SnapshotChannel: { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(SnapshotChannel), - Priority: 6, - SendQueueCapacity: 10, - RecvMessageCapacity: snapshotMsgSize, - RecvBufferCapacity: 128, - MaxSendBytes: 400, - }, - }, - ChunkChannel: { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(ChunkChannel), - Priority: 3, - SendQueueCapacity: 4, - RecvMessageCapacity: chunkMsgSize, - RecvBufferCapacity: 128, - MaxSendBytes: 400, - }, - }, - LightBlockChannel: { - MsgType: new(ssproto.Message), - Descriptor: &p2p.ChannelDescriptor{ - ID: byte(LightBlockChannel), - Priority: 2, - SendQueueCapacity: 10, - RecvMessageCapacity: lightBlockMsgSize, - RecvBufferCapacity: 128, - MaxSendBytes: 400, - }, - }, - } ) const ( @@ -81,6 +40,9 @@ const ( // LightBlockChannel exchanges light blocks LightBlockChannel = p2p.ChannelID(0x62) + // ParamsChannel exchanges consensus params + ParamsChannel = p2p.ChannelID(0x63) + // recentSnapshots is the number of recent snapshots to send and receive per peer. recentSnapshots = 10 @@ -91,21 +53,73 @@ const ( chunkMsgSize = int(16e6) // ~16MB // lightBlockMsgSize is the maximum size of a lightBlockResponseMessage - lightBlockMsgSize = int(1e7) // ~10MB + lightBlockMsgSize = int(1e7) // ~1MB + + // paramMsgSize is the maximum size of a paramsResponseMessage + paramMsgSize = int(1e5) // ~100kb // lightBlockResponseTimeout is how long the dispatcher waits for a peer to // return a light block - lightBlockResponseTimeout = 30 * time.Second + lightBlockResponseTimeout = 10 * time.Second + + // consensusParamsResponseTimeout is the time the p2p state provider waits + // before performing a secondary call + consensusParamsResponseTimeout = 5 * time.Second // maxLightBlockRequestRetries is the amount of retries acceptable before // the backfill process aborts maxLightBlockRequestRetries = 20 ) -// SyncReactor defines an interface used for testing abilities of node.startStateSync. -type SyncReactor interface { - Sync(context.Context, StateProvider, time.Duration) (sm.State, error) - Backfill(sm.State) error +func GetChannelDescriptors() []*p2p.ChannelDescriptor { + return []*p2p.ChannelDescriptor{ + { + + ID: SnapshotChannel, + MessageType: new(ssproto.Message), + Priority: 6, + SendQueueCapacity: 10, + RecvMessageCapacity: snapshotMsgSize, + RecvBufferCapacity: 128, + }, + { + ID: ChunkChannel, + Priority: 3, + MessageType: new(ssproto.Message), + SendQueueCapacity: 4, + RecvMessageCapacity: chunkMsgSize, + RecvBufferCapacity: 128, + }, + { + ID: LightBlockChannel, + MessageType: new(ssproto.Message), + Priority: 5, + SendQueueCapacity: 10, + RecvMessageCapacity: lightBlockMsgSize, + RecvBufferCapacity: 128, + }, + { + ID: ParamsChannel, + MessageType: new(ssproto.Message), + Priority: 2, + SendQueueCapacity: 10, + RecvMessageCapacity: paramMsgSize, + RecvBufferCapacity: 128, + }, + } + +} + +// Metricer defines an interface used for the rpc sync info query, please see statesync.metrics +// for the details. +type Metricer interface { + TotalSnapshots() int64 + ChunkProcessAvgTime() time.Duration + SnapshotHeight() int64 + SnapshotChunksCount() int64 + SnapshotChunksTotal() int64 + BackFilledBlocks() int64 + BackFillBlocksTotal() int64 } // Reactor handles state sync, both restoring snapshots for the local node and @@ -113,9 +127,11 @@ type SyncReactor interface { type Reactor struct { service.BaseService - cfg config.StateSyncConfig - stateStore sm.Store - blockStore *store.BlockStore + chainID string + initialHeight int64 + cfg config.StateSyncConfig + stateStore sm.Store + blockStore *store.BlockStore conn proxy.AppConnSnapshot connQuery proxy.AppConnQuery @@ -123,15 +139,26 @@ type Reactor struct { snapshotCh *p2p.Channel chunkCh *p2p.Channel blockCh *p2p.Channel + paramsCh *p2p.Channel peerUpdates *p2p.PeerUpdates closeCh chan struct{} - dispatcher *dispatcher + // Dispatcher is used to multiplex light block requests and responses over multiple + // peers used by the p2p state provider and in reverse sync. + dispatcher *Dispatcher + peers *peerList - // This will only be set when a state sync is in progress. It is used to feed - // received snapshots and chunks into the sync. - mtx tmsync.RWMutex - syncer *syncer + // These will only be set when a state sync is in progress. It is used to feed + // received snapshots and chunks into the syncer and manage incoming and outgoing + // providers. + mtx tmsync.RWMutex + syncer *syncer + providers map[types.NodeID]*BlockProvider + stateProvider StateProvider + + metrics *Metrics + backfillBlockTotal int64 + backfilledBlocks int64 } // NewReactor returns a reference to a new state sync reactor, which implements @@ -139,29 +166,38 @@ type Reactor struct { // and querying, references to p2p Channels and a channel to listen for peer // updates on. Note, the reactor will close all p2p Channels when stopping. func NewReactor( + chainID string, + initialHeight int64, cfg config.StateSyncConfig, logger log.Logger, conn proxy.AppConnSnapshot, connQuery proxy.AppConnQuery, - snapshotCh, chunkCh, blockCh *p2p.Channel, + snapshotCh, chunkCh, blockCh, paramsCh *p2p.Channel, peerUpdates *p2p.PeerUpdates, stateStore sm.Store, blockStore *store.BlockStore, tempDir string, + ssMetrics *Metrics, ) *Reactor { r := &Reactor{ - cfg: cfg, - conn: conn, - connQuery: connQuery, - snapshotCh: snapshotCh, - chunkCh: chunkCh, - blockCh: blockCh, - peerUpdates: peerUpdates, - closeCh: make(chan struct{}), - tempDir: tempDir, - dispatcher: newDispatcher(blockCh.Out, lightBlockResponseTimeout), - stateStore: stateStore, - blockStore: blockStore, + chainID: chainID, + initialHeight: initialHeight, + cfg: cfg, + conn: conn, + connQuery: connQuery, + snapshotCh: snapshotCh, + chunkCh: chunkCh, + blockCh: blockCh, + paramsCh: paramsCh, + peerUpdates: peerUpdates, + closeCh: make(chan struct{}), + tempDir: tempDir, + stateStore: stateStore, + blockStore: blockStore, + peers: newPeerList(), + dispatcher: NewDispatcher(blockCh.Out), + providers: make(map[types.NodeID]*BlockProvider), + metrics: ssMetrics, } r.BaseService = *service.NewBaseService(logger, "StateSync", r) @@ -170,26 +206,20 @@ func NewReactor( // OnStart starts separate go routines for each p2p Channel and listens for // envelopes on each. In addition, it also listens for peer updates and handles -// messages on that p2p channel accordingly. The caller must be sure to execute -// OnStop to ensure the outbound p2p Channels are closed. No error is returned. +// messages on that p2p channel accordingly. Note, we do not launch a go-routine to +// handle individual envelopes as to not have to deal with bounding workers or pools. +// The caller must be sure to execute OnStop to ensure the outbound p2p Channels are +// closed. No error is returned. func (r *Reactor) OnStart() error { - // Listen for envelopes on the snapshot p2p Channel in a separate go-routine - // as to not block or cause IO contention with the chunk p2p Channel. Note, - // we do not launch a go-routine to handle individual envelopes as to not - // have to deal with bounding workers or pools. go r.processSnapshotCh() - // Listen for envelopes on the chunk p2p Channel in a separate go-routine - // as to not block or cause IO contention with the snapshot p2p Channel. Note, - // we do not launch a go-routine to handle individual envelopes as to not - // have to deal with bounding workers or pools. go r.processChunkCh() go r.processBlockCh() - go r.processPeerUpdates() + go r.processParamsCh() - r.dispatcher.start() + go r.processPeerUpdates() return nil } @@ -198,7 +228,9 @@ func (r *Reactor) OnStart() error { // blocking until they all exit. func (r *Reactor) OnStop() { // tell the dispatcher to stop sending any more requests - r.dispatcher.stop() + r.dispatcher.Close() + // wait for any remaining requests to complete + <-r.dispatcher.Done() // Close closeCh to signal to all spawned goroutines to gracefully exit. All // p2p Channels should execute Close(). @@ -207,30 +239,34 @@ func (r *Reactor) OnStop() { // Wait for all p2p Channels to be closed before returning. This ensures we // can easily reason about synchronization of all p2p Channels and ensure no // panics will occur. + <-r.peerUpdates.Done() <-r.snapshotCh.Done() <-r.chunkCh.Done() <-r.blockCh.Done() - <-r.peerUpdates.Done() + <-r.paramsCh.Done() } // Sync runs a state sync, fetching snapshots and providing chunks to the -// application. It also saves tendermint state and runs a backfill process to -// retrieve the necessary amount of headers, commits and validators sets to be -// able to process evidence and participate in consensus. -func (r *Reactor) Sync( - ctx context.Context, - stateProvider StateProvider, - discoveryTime time.Duration, -) (sm.State, error) { +// application. At the close of the operation, Sync will bootstrap the state +// store and persist the commit at that height so that either consensus or +// blocksync can commence. It will then proceed to backfill the necessary amount +// of historical blocks before participating in consensus +func (r *Reactor) Sync(ctx context.Context) (sm.State, error) { + // We need at least two peers (for cross-referencing of light blocks) before we can + // begin state sync + if err := r.waitForEnoughPeers(ctx, 2); err != nil { + return sm.State{}, err + } + r.mtx.Lock() if r.syncer != nil { r.mtx.Unlock() return sm.State{}, errors.New("a state sync is already in progress") } - if stateProvider == nil { + if err := r.initStateProvider(ctx, r.chainID, r.initialHeight); err != nil { r.mtx.Unlock() - return sm.State{}, errors.New("the stateProvider should not be nil when doing the state sync") + return sm.State{}, err } r.syncer = newSyncer( @@ -238,30 +274,41 @@ func (r *Reactor) Sync( r.Logger, r.conn, r.connQuery, - stateProvider, + r.stateProvider, r.snapshotCh.Out, r.chunkCh.Out, + r.snapshotCh.Done(), r.tempDir, + r.metrics, ) r.mtx.Unlock() + defer func() { + r.mtx.Lock() + // reset syncing objects at the close of Sync + r.syncer = nil + r.stateProvider = nil + r.mtx.Unlock() + }() requestSnapshotsHook := func() { // request snapshots from all currently connected peers - r.snapshotCh.Out <- p2p.Envelope{ + msg := p2p.Envelope{ Broadcast: true, Message: &ssproto.SnapshotsRequest{}, } + + select { + case <-ctx.Done(): + case <-r.closeCh: + case r.snapshotCh.Out <- msg: + } } - state, commit, err := r.syncer.SyncAny(ctx, discoveryTime, requestSnapshotsHook) + state, commit, err := r.syncer.SyncAny(ctx, r.cfg.DiscoveryTime, requestSnapshotsHook) if err != nil { return sm.State{}, err } - r.mtx.Lock() - r.syncer = nil - r.mtx.Unlock() - err = r.stateStore.Bootstrap(state) if err != nil { return sm.State{}, fmt.Errorf("failed to bootstrap node with new state: %w", err) @@ -272,6 +319,11 @@ func (r *Reactor) Sync( return sm.State{}, fmt.Errorf("failed to store last seen commit: %w", err) } + err = r.Backfill(ctx, state) + if err != nil { + r.Logger.Error("backfill failed. Proceeding optimistically...", "err", err) + } + return state, nil } @@ -279,7 +331,7 @@ func (r *Reactor) Sync( // order. It does not stop verifying blocks until reaching a block with a height // and time that is less or equal to the stopHeight and stopTime. The // trustedBlockID should be of the header at startHeight. -func (r *Reactor) Backfill(state sm.State) error { +func (r *Reactor) Backfill(ctx context.Context, state sm.State) error { params := state.ConsensusParams.Evidence stopHeight := state.LastBlockHeight - params.MaxAgeNumBlocks stopTime := state.LastBlockTime.Add(-params.MaxAgeDuration) @@ -290,7 +342,7 @@ func (r *Reactor) Backfill(state sm.State) error { stopTime = state.LastBlockTime } return r.backfill( - context.Background(), + ctx, state.ChainID, state.LastBlockHeight, stopHeight, @@ -308,12 +360,15 @@ func (r *Reactor) backfill( stopTime time.Time, ) error { r.Logger.Info("starting backfill process...", "startHeight", startHeight, - "stopHeight", stopHeight, "trustedBlockID", trustedBlockID) + "stopHeight", stopHeight, "stopTime", stopTime, "trustedBlockID", trustedBlockID) + + r.backfillBlockTotal = startHeight - stopHeight + 1 + r.metrics.BackFillBlocksTotal.Set(float64(r.backfillBlockTotal)) const sleepTime = 1 * time.Second var ( lastValidatorSet *types.ValidatorSet - lastChangeHeight int64 = startHeight + lastChangeHeight = startHeight ) queue := newBlockQueue(startHeight, stopHeight, initialHeight, stopTime, maxLightBlockRequestRetries) @@ -330,8 +385,18 @@ func (r *Reactor) backfill( for { select { case height := <-queue.nextHeight(): - r.Logger.Debug("fetching next block", "height", height) - lb, peer, err := r.dispatcher.LightBlock(ctxWithCancel, height) + // pop the next peer of the list to send a request to + peer := r.peers.Pop(ctx) + r.Logger.Debug("fetching next block", "height", height, "peer", peer) + subCtx, cancel := context.WithTimeout(ctxWithCancel, lightBlockResponseTimeout) + defer cancel() + lb, err := func() (*types.LightBlock, error) { + defer cancel() + // request the light block with a timeout + return r.dispatcher.LightBlock(subCtx, height, peer) + }() + // once the peer has returned a value, add it back to the peer list to be used again + r.peers.Append(peer) if errors.Is(err, context.Canceled) { return } @@ -353,7 +418,7 @@ func (r *Reactor) backfill( queue.retry(height) // As we are fetching blocks backwards, if this node doesn't have the block it likely doesn't // have any prior ones, thus we remove it from the peer list. - r.dispatcher.removePeer(peer) + r.peers.Remove(peer) continue } @@ -429,11 +494,21 @@ func (r *Reactor) backfill( } trustedBlockID = resp.block.LastBlockID - queue.success(resp.block.Height) + queue.success() r.Logger.Info("backfill: verified and stored light block", "height", resp.block.Height) lastValidatorSet = resp.block.ValidatorSet + r.backfilledBlocks++ + r.metrics.BackFilledBlocks.Add(1) + + // The block height might be less than the stopHeight because of the stopTime condition + // hasn't been fulfilled. + if resp.block.Height < stopHeight { + r.backfillBlockTotal++ + r.metrics.BackFillBlocksTotal.Set(float64(r.backfillBlockTotal)) + } + case <-queue.done(): if err := queue.error(); err != nil { return err @@ -450,12 +525,6 @@ func (r *Reactor) backfill( } } -// Dispatcher exposes the dispatcher so that a state provider can use it for -// light client verification -func (r *Reactor) Dispatcher() *dispatcher { //nolint:golint - return r.dispatcher -} - // handleSnapshotMessage handles envelopes sent from peers on the // SnapshotChannel. It returns an error only if the Envelope.Message is unknown // for this channel. This should never be called outside of handleMessage. @@ -498,7 +567,7 @@ func (r *Reactor) handleSnapshotMessage(envelope p2p.Envelope) error { return nil } - logger.Debug("received snapshot", "height", msg.Height, "format", msg.Format) + logger.Info("received snapshot", "height", msg.Height, "format", msg.Format) _, err := r.syncer.AddSnapshot(envelope.From, &snapshot{ Height: msg.Height, Format: msg.Format, @@ -516,6 +585,7 @@ func (r *Reactor) handleSnapshotMessage(envelope p2p.Envelope) error { ) return nil } + logger.Info("added snapshot", "height", msg.Height, "format", msg.Format) default: return fmt.Errorf("received unknown message: %T", msg) @@ -623,6 +693,15 @@ func (r *Reactor) handleLightBlockMessage(envelope p2p.Envelope) error { r.Logger.Error("failed to retrieve light block", "err", err, "height", msg.Height) return err } + if lb == nil { + r.blockCh.Out <- p2p.Envelope{ + To: envelope.From, + Message: &ssproto.LightBlockResponse{ + LightBlock: nil, + }, + } + return nil + } lbproto, err := lb.ToProto() if err != nil { @@ -640,8 +719,56 @@ func (r *Reactor) handleLightBlockMessage(envelope p2p.Envelope) error { } case *ssproto.LightBlockResponse: - if err := r.dispatcher.respond(msg.LightBlock, envelope.From); err != nil { - r.Logger.Error("error processing light block response", "err", err) + var height int64 + if msg.LightBlock != nil { + height = msg.LightBlock.SignedHeader.Header.Height + } + r.Logger.Info("received light block response", "peer", envelope.From, "height", height) + if err := r.dispatcher.Respond(msg.LightBlock, envelope.From); err != nil { + r.Logger.Error("error processing light block response", "err", err, "height", height) + } + + default: + return fmt.Errorf("received unknown message: %T", msg) + } + + return nil +} + +func (r *Reactor) handleParamsMessage(envelope p2p.Envelope) error { + switch msg := envelope.Message.(type) { + case *ssproto.ParamsRequest: + r.Logger.Debug("received consensus params request", "height", msg.Height) + cp, err := r.stateStore.LoadConsensusParams(int64(msg.Height)) + if err != nil { + r.Logger.Error("failed to fetch requested consensus params", "err", err, "height", msg.Height) + return nil + } + + cpproto := cp.ToProto() + r.paramsCh.Out <- p2p.Envelope{ + To: envelope.From, + Message: &ssproto.ParamsResponse{ + Height: msg.Height, + ConsensusParams: cpproto, + }, + } + + case *ssproto.ParamsResponse: + r.mtx.RLock() + defer r.mtx.RUnlock() + r.Logger.Debug("received consensus params response", "height", msg.Height) + + cp := types.ConsensusParamsFromProto(msg.ConsensusParams) + + if sp, ok := r.stateProvider.(*stateProviderP2P); ok { + select { + case sp.paramsRecvCh <- cp: + case <-time.After(time.Second): + return errors.New("failed to send consensus params, stateprovider not ready for response") + } + } else { + r.Logger.Debug("received unexpected params response; using RPC state provider", "peer", envelope.From) } default: @@ -678,6 +805,9 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err case LightBlockChannel: err = r.handleLightBlockMessage(envelope) + case ParamsChannel: + err = r.handleParamsMessage(envelope) + default: err = fmt.Errorf("unknown channel ID (%d) for envelope (%v)", chID, envelope) } @@ -703,6 +833,10 @@ func (r *Reactor) processBlockCh() { r.processCh(r.blockCh, "light block") } +func (r *Reactor) processParamsCh() { + r.processCh(r.paramsCh, "consensus params") +} + // processCh routes state sync messages to their respective handlers. Any error // encountered during message execution will result in a PeerError being sent on // the respective channel. When the reactor is stopped, we will catch the signal @@ -732,24 +866,41 @@ func (r *Reactor) processCh(ch *p2p.Channel, chName string) { // processPeerUpdate processes a PeerUpdate, returning an error upon failing to // handle the PeerUpdate or if a panic is recovered. func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) { - r.Logger.Debug("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) - - r.mtx.RLock() - defer r.mtx.RUnlock() + r.Logger.Info("received peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) switch peerUpdate.Status { case p2p.PeerStatusUp: - if r.syncer != nil { - r.syncer.AddPeer(peerUpdate.NodeID) + r.peers.Append(peerUpdate.NodeID) + case p2p.PeerStatusDown: + r.peers.Remove(peerUpdate.NodeID) + } + + r.mtx.Lock() + defer r.mtx.Unlock() + if r.syncer == nil { + return + } + + switch peerUpdate.Status { + case p2p.PeerStatusUp: + newProvider := NewBlockProvider(peerUpdate.NodeID, r.chainID, r.dispatcher) + r.providers[peerUpdate.NodeID] = newProvider + err := r.syncer.AddPeer(peerUpdate.NodeID) + if err != nil { + r.Logger.Error("error adding peer to syncer", "error", err) + return + } + if sp, ok := r.stateProvider.(*stateProviderP2P); ok { + // we do this in a separate routine to not block whilst waiting for the light client to finish + // whatever call it's currently executing + go sp.addProvider(newProvider) } - r.dispatcher.addPeer(peerUpdate.NodeID) case p2p.PeerStatusDown: - if r.syncer != nil { - r.syncer.RemovePeer(peerUpdate.NodeID) - } - r.dispatcher.removePeer(peerUpdate.NodeID) + delete(r.providers, peerUpdate.NodeID) + r.syncer.RemovePeer(peerUpdate.NodeID) } + r.Logger.Info("processed peer update", "peer", peerUpdate.NodeID, "status", peerUpdate.Status) } // processPeerUpdates initiates a blocking process where we listen for and handle @@ -839,5 +990,133 @@ func (r *Reactor) fetchLightBlock(height uint64) (*types.LightBlock, error) { }, ValidatorSet: vals, }, nil - +} + +func (r *Reactor) waitForEnoughPeers(ctx context.Context, numPeers int) error { + startAt := time.Now() + t := time.NewTicker(100 * time.Millisecond) + defer t.Stop() + logT := time.NewTicker(time.Minute) + defer logT.Stop() + var iter int + for r.peers.Len() < numPeers { + iter++ + select { + case <-ctx.Done(): + return fmt.Errorf("operation canceled while waiting for peers after %.2fs [%d/%d]", + time.Since(startAt).Seconds(), r.peers.Len(), numPeers) + case <-r.closeCh: + return fmt.Errorf("shutdown while waiting for peers after %.2fs [%d/%d]", + time.Since(startAt).Seconds(), r.peers.Len(), numPeers) + case <-t.C: + continue + case <-logT.C: + r.Logger.Info("waiting for sufficient peers to start statesync", + "duration", time.Since(startAt).String(), + "target", numPeers, + "peers", r.peers.Len(), + "iters", iter, + ) + continue + } + } + return nil +} + +func (r *Reactor) initStateProvider(ctx context.Context, chainID string, initialHeight int64) error { + var err error + to := light.TrustOptions{ + Period: r.cfg.TrustPeriod, + Height: r.cfg.TrustHeight, + Hash: r.cfg.TrustHashBytes(), + } + spLogger := r.Logger.With("module", "stateprovider") + spLogger.Info("initializing state provider", "trustPeriod", to.Period, + "trustHeight", to.Height, "useP2P", r.cfg.UseP2P) + + if r.cfg.UseP2P { + if err := r.waitForEnoughPeers(ctx, 2); err != nil { + return err + } + + peers := r.peers.All() + providers := make([]provider.Provider, len(peers)) + for idx, p := range peers { + providers[idx] = NewBlockProvider(p, chainID, r.dispatcher) + } + + r.stateProvider, err = NewP2PStateProvider(ctx, chainID, initialHeight, providers, to, r.paramsCh.Out, spLogger) + if err != nil { + return fmt.Errorf("failed to initialize P2P state provider: %w", err) + } + } else { + r.stateProvider, err = NewRPCStateProvider(ctx, chainID, initialHeight, r.cfg.RPCServers, to, spLogger) + if err != nil { + return fmt.Errorf("failed to initialize RPC state provider: %w", err) + } + } + return nil +} + +func (r *Reactor) TotalSnapshots() int64 { + r.mtx.RLock() + defer r.mtx.RUnlock() + + if r.syncer != nil && r.syncer.snapshots != nil { + return int64(len(r.syncer.snapshots.snapshots)) + } + return 0 +} + +func (r *Reactor) ChunkProcessAvgTime() time.Duration { + r.mtx.RLock() + defer r.mtx.RUnlock() + + if r.syncer != nil { + return time.Duration(r.syncer.avgChunkTime) + } + return time.Duration(0) +} + +func (r *Reactor) SnapshotHeight() int64 { + r.mtx.RLock() + defer r.mtx.RUnlock() + + if r.syncer != nil { + return r.syncer.lastSyncedSnapshotHeight + } + return 0 +} +func (r *Reactor) SnapshotChunksCount() int64 { + r.mtx.RLock() + defer r.mtx.RUnlock() + + if r.syncer != nil && r.syncer.chunks != nil { + return int64(r.syncer.chunks.numChunksReturned()) + } + return 0 +} + +func (r *Reactor) SnapshotChunksTotal() int64 { + r.mtx.RLock() + defer r.mtx.RUnlock() + + if r.syncer != nil && r.syncer.processingSnapshot != nil { + return int64(r.syncer.processingSnapshot.Chunks) + } + return 0 +} + +func (r *Reactor) BackFilledBlocks() int64 { + r.mtx.RLock() + defer r.mtx.RUnlock() + + return r.backfilledBlocks +} + +func (r *Reactor) BackFillBlocksTotal() int64 { + r.mtx.RLock() + defer r.mtx.RUnlock() + + return r.backfillBlockTotal } diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index 9bff72679..b90e5fd78 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -3,6 +3,7 @@ package statesync import ( "context" "fmt" + "strings" "sync" "testing" "time" @@ -15,18 +16,23 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/proxy" + proxymocks "github.com/tendermint/tendermint/internal/proxy/mocks" + smmocks "github.com/tendermint/tendermint/internal/state/mocks" "github.com/tendermint/tendermint/internal/statesync/mocks" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/light/provider" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - proxymocks "github.com/tendermint/tendermint/proxy/mocks" - smmocks "github.com/tendermint/tendermint/state/mocks" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) +var ( + m = PrometheusMetrics(config.TestConfig().Instrumentation.Namespace) +) + type reactorTestSuite struct { reactor *Reactor syncer *syncer @@ -50,6 +56,11 @@ type reactorTestSuite struct { blockOutCh chan p2p.Envelope blockPeerErrCh chan p2p.PeerError + paramsChannel *p2p.Channel + paramsInCh chan p2p.Envelope + paramsOutCh chan p2p.Envelope + paramsPeerErrCh chan p2p.PeerError + peerUpdateCh chan p2p.PeerUpdate peerUpdates *p2p.PeerUpdates @@ -86,6 +97,9 @@ func setup( blockInCh: make(chan p2p.Envelope, chBuf), blockOutCh: make(chan p2p.Envelope, chBuf), blockPeerErrCh: make(chan p2p.PeerError, chBuf), + paramsInCh: make(chan p2p.Envelope, chBuf), + paramsOutCh: make(chan p2p.Envelope, chBuf), + paramsPeerErrCh: make(chan p2p.PeerError, chBuf), conn: conn, connQuery: connQuery, stateProvider: stateProvider, @@ -118,12 +132,22 @@ func setup( rts.blockPeerErrCh, ) + rts.paramsChannel = p2p.NewChannel( + ParamsChannel, + new(ssproto.Message), + rts.paramsInCh, + rts.paramsOutCh, + rts.paramsPeerErrCh, + ) + rts.stateStore = &smmocks.Store{} rts.blockStore = store.NewBlockStore(dbm.NewMemDB()) cfg := config.DefaultStateSyncConfig() rts.reactor = NewReactor( + factory.DefaultTestChainID, + 1, *cfg, log.TestingLogger(), conn, @@ -131,15 +155,14 @@ func setup( rts.snapshotChannel, rts.chunkChannel, rts.blockChannel, + rts.paramsChannel, rts.peerUpdates, rts.stateStore, rts.blockStore, "", + m, ) - // override the dispatcher with one with a shorter timeout - rts.reactor.dispatcher = newDispatcher(rts.blockChannel.Out, 1*time.Second) - rts.syncer = newSyncer( *cfg, log.NewNopLogger(), @@ -148,7 +171,9 @@ func setup( stateProvider, rts.snapshotOutCh, rts.chunkOutCh, + rts.snapshotChannel.Done(), "", + rts.reactor.metrics, ) require.NoError(t, rts.reactor.Start()) @@ -162,6 +187,58 @@ func setup( return rts } +func TestReactor_Sync(t *testing.T) { + const snapshotHeight = 7 + rts := setup(t, nil, nil, nil, 2) + chain := buildLightBlockChain(t, 1, 10, time.Now()) + // app accepts any snapshot + rts.conn.On("OfferSnapshotSync", ctx, mock.AnythingOfType("types.RequestOfferSnapshot")). + Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ACCEPT}, nil) + + // app accepts every chunk + rts.conn.On("ApplySnapshotChunkSync", ctx, mock.AnythingOfType("types.RequestApplySnapshotChunk")). + Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) + + // app query returns valid state app hash + rts.connQuery.On("InfoSync", ctx, proxy.RequestInfo).Return(&abci.ResponseInfo{ + AppVersion: 9, + LastBlockHeight: snapshotHeight, + LastBlockAppHash: chain[snapshotHeight+1].AppHash, + }, nil) + + // store accepts state and validator sets + rts.stateStore.On("Bootstrap", mock.AnythingOfType("state.State")).Return(nil) + rts.stateStore.On("SaveValidatorSets", mock.AnythingOfType("int64"), mock.AnythingOfType("int64"), + mock.AnythingOfType("*types.ValidatorSet")).Return(nil) + + closeCh := make(chan struct{}) + defer close(closeCh) + go handleLightBlockRequests(t, chain, rts.blockOutCh, + rts.blockInCh, closeCh, 0) + go graduallyAddPeers(rts.peerUpdateCh, closeCh, 1*time.Second) + go handleSnapshotRequests(t, rts.snapshotOutCh, rts.snapshotInCh, closeCh, []snapshot{ + { + Height: uint64(snapshotHeight), + Format: 1, + Chunks: 1, + }, + }) + + go handleChunkRequests(t, rts.chunkOutCh, rts.chunkInCh, closeCh, []byte("abc")) + + go handleConsensusParamsRequest(t, rts.paramsOutCh, rts.paramsInCh, closeCh) + + // update the config to use the p2p provider + rts.reactor.cfg.UseP2P = true + rts.reactor.cfg.TrustHeight = 1 + rts.reactor.cfg.TrustHash = fmt.Sprintf("%X", chain[1].Hash()) + rts.reactor.cfg.DiscoveryTime = 1 * time.Second + + // Run state sync + _, err := rts.reactor.Sync(context.Background()) + require.NoError(t, err) +} + func TestReactor_ChunkRequest_InvalidRequest(t *testing.T) { rts := setup(t, nil, nil, nil, 2) @@ -370,7 +447,7 @@ func TestReactor_LightBlockResponse(t *testing.T) { } } -func TestReactor_Dispatcher(t *testing.T) { +func TestReactor_BlockProviders(t *testing.T) { rts := setup(t, nil, nil, nil, 2) rts.peerUpdateCh <- p2p.PeerUpdate{ NodeID: types.NodeID("aa"), @@ -387,9 +464,13 @@ func TestReactor_Dispatcher(t *testing.T) { chain := buildLightBlockChain(t, 1, 10, time.Now()) go handleLightBlockRequests(t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) - dispatcher := rts.reactor.Dispatcher() - providers := dispatcher.Providers(factory.DefaultTestChainID, 5*time.Second) - require.Len(t, providers, 2) + peers := rts.reactor.peers.All() + require.Len(t, peers, 2) + + providers := make([]provider.Provider, len(peers)) + for idx, peer := range peers { + providers[idx] = NewBlockProvider(peer, factory.DefaultTestChainID, rts.reactor.dispatcher) + } wg := sync.WaitGroup{} @@ -416,6 +497,75 @@ func TestReactor_Dispatcher(t *testing.T) { t.Fail() case <-ctx.Done(): } + +} + +func TestReactor_StateProviderP2P(t *testing.T) { + rts := setup(t, nil, nil, nil, 2) + // make syncer non nil else test won't think we are state syncing + rts.reactor.syncer = rts.syncer + peerA := types.NodeID(strings.Repeat("a", 2*types.NodeIDByteLength)) + peerB := types.NodeID(strings.Repeat("b", 2*types.NodeIDByteLength)) + rts.peerUpdateCh <- p2p.PeerUpdate{ + NodeID: peerA, + Status: p2p.PeerStatusUp, + } + rts.peerUpdateCh <- p2p.PeerUpdate{ + NodeID: peerB, + Status: p2p.PeerStatusUp, + } + + closeCh := make(chan struct{}) + defer close(closeCh) + + chain := buildLightBlockChain(t, 1, 10, time.Now()) + go handleLightBlockRequests(t, chain, rts.blockOutCh, rts.blockInCh, closeCh, 0) + go handleConsensusParamsRequest(t, rts.paramsOutCh, rts.paramsInCh, closeCh) + + rts.reactor.cfg.UseP2P = true + rts.reactor.cfg.TrustHeight = 1 + rts.reactor.cfg.TrustHash = fmt.Sprintf("%X", chain[1].Hash()) + + for _, p := range []types.NodeID{peerA, peerB} { + if !rts.reactor.peers.Contains(p) { + rts.reactor.peers.Append(p) + } + } + require.True(t, rts.reactor.peers.Len() >= 2, "peer network not configured") + + bctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ictx, cancel := context.WithTimeout(bctx, time.Second) + defer cancel() + + rts.reactor.mtx.Lock() + err := rts.reactor.initStateProvider(ictx, factory.DefaultTestChainID, 1) + rts.reactor.mtx.Unlock() + require.NoError(t, err) + rts.reactor.syncer.stateProvider = rts.reactor.stateProvider + + actx, cancel := context.WithTimeout(bctx, 10*time.Second) + defer cancel() + + appHash, err := rts.reactor.stateProvider.AppHash(actx, 5) + require.NoError(t, err) + require.Len(t, appHash, 32) + + state, err := rts.reactor.stateProvider.State(actx, 5) + require.NoError(t, err) + require.Equal(t, appHash, state.AppHash) + require.Equal(t, types.DefaultConsensusParams(), &state.ConsensusParams) + + commit, err := rts.reactor.stateProvider.Commit(actx, 5) + require.NoError(t, err) + require.Equal(t, commit.BlockID, state.LastBlockID) + + added, err := rts.reactor.syncer.AddSnapshot(peerA, &snapshot{ + Height: 1, Format: 2, Chunks: 7, Hash: []byte{1, 2}, Metadata: []byte{1}, + }) + require.NoError(t, err) + require.True(t, added) } func TestReactor_Backfill(t *testing.T) { @@ -469,6 +619,9 @@ func TestReactor_Backfill(t *testing.T) { ) if failureRate > 3 { require.Error(t, err) + + require.NotEqual(t, rts.reactor.backfilledBlocks, rts.reactor.backfillBlockTotal) + require.Equal(t, startHeight-stopHeight+1, rts.reactor.backfillBlockTotal) } else { require.NoError(t, err) @@ -479,7 +632,12 @@ func TestReactor_Backfill(t *testing.T) { require.Nil(t, rts.blockStore.LoadBlockMeta(stopHeight-1)) require.Nil(t, rts.blockStore.LoadBlockMeta(startHeight+1)) + + require.Equal(t, startHeight-stopHeight+1, rts.reactor.backfilledBlocks) + require.Equal(t, startHeight-stopHeight+1, rts.reactor.backfillBlockTotal) } + require.Equal(t, rts.reactor.backfilledBlocks, rts.reactor.BackFilledBlocks()) + require.Equal(t, rts.reactor.backfillBlockTotal, rts.reactor.BackFillBlocksTotal()) }) } } @@ -494,7 +652,6 @@ func retryUntil(t *testing.T, fn func() bool, timeout time.Duration) { if fn() { return } - require.NoError(t, ctx.Err()) } } @@ -523,7 +680,9 @@ func handleLightBlockRequests(t *testing.T, } else { switch errorCount % 3 { case 0: // send a different block - differntLB, err := mockLB(t, int64(msg.Height), factory.DefaultTestTime, factory.MakeBlockID()).ToProto() + vals, pv := factory.RandValidatorSet(3, 10) + _, _, lb := mockLB(t, int64(msg.Height), factory.DefaultTestTime, factory.MakeBlockID(), vals, pv) + differntLB, err := lb.ToProto() require.NoError(t, err) sending <- p2p.Envelope{ From: envelope.To, @@ -550,37 +709,147 @@ func handleLightBlockRequests(t *testing.T, } } +func handleConsensusParamsRequest(t *testing.T, receiving, sending chan p2p.Envelope, closeCh chan struct{}) { + t.Helper() + params := types.DefaultConsensusParams() + paramsProto := params.ToProto() + for { + select { + case envelope := <-receiving: + t.Log("received consensus params request") + msg, ok := envelope.Message.(*ssproto.ParamsRequest) + require.True(t, ok) + sending <- p2p.Envelope{ + From: envelope.To, + Message: &ssproto.ParamsResponse{ + Height: msg.Height, + ConsensusParams: paramsProto, + }, + } + + case <-closeCh: + return + } + } +} + func buildLightBlockChain(t *testing.T, fromHeight, toHeight int64, startTime time.Time) map[int64]*types.LightBlock { chain := make(map[int64]*types.LightBlock, toHeight-fromHeight) lastBlockID := factory.MakeBlockID() - blockTime := startTime.Add(-5 * time.Minute) + blockTime := startTime.Add(time.Duration(fromHeight-toHeight) * time.Minute) + vals, pv := factory.RandValidatorSet(3, 10) for height := fromHeight; height < toHeight; height++ { - chain[height] = mockLB(t, height, blockTime, lastBlockID) + vals, pv, chain[height] = mockLB(t, height, blockTime, lastBlockID, vals, pv) lastBlockID = factory.MakeBlockIDWithHash(chain[height].Header.Hash()) blockTime = blockTime.Add(1 * time.Minute) } return chain } -func mockLB(t *testing.T, height int64, time time.Time, - lastBlockID types.BlockID) *types.LightBlock { +func mockLB(t *testing.T, height int64, time time.Time, lastBlockID types.BlockID, + currentVals *types.ValidatorSet, currentPrivVals []types.PrivValidator, +) (*types.ValidatorSet, []types.PrivValidator, *types.LightBlock) { header, err := factory.MakeHeader(&types.Header{ Height: height, LastBlockID: lastBlockID, Time: time, }) require.NoError(t, err) - vals, pv := factory.RandValidatorSet(3, 10) - header.ValidatorsHash = vals.Hash() + nextVals, nextPrivVals := factory.RandValidatorSet(3, 10) + header.ValidatorsHash = currentVals.Hash() + header.NextValidatorsHash = nextVals.Hash() + header.ConsensusHash = types.DefaultConsensusParams().HashConsensusParams() lastBlockID = factory.MakeBlockIDWithHash(header.Hash()) - voteSet := types.NewVoteSet(factory.DefaultTestChainID, height, 0, tmproto.PrecommitType, vals) - commit, err := factory.MakeCommit(lastBlockID, height, 0, voteSet, pv, time) + voteSet := types.NewVoteSet(factory.DefaultTestChainID, height, 0, tmproto.PrecommitType, currentVals) + commit, err := factory.MakeCommit(lastBlockID, height, 0, voteSet, currentPrivVals, time) require.NoError(t, err) - return &types.LightBlock{ + return nextVals, nextPrivVals, &types.LightBlock{ SignedHeader: &types.SignedHeader{ Header: header, Commit: commit, }, - ValidatorSet: vals, + ValidatorSet: currentVals, + } +} + +// graduallyAddPeers delivers a new randomly-generated peer update on peerUpdateCh once +// per interval, until closeCh is closed. Each peer update is assigned a random node ID. +func graduallyAddPeers( + peerUpdateCh chan p2p.PeerUpdate, + closeCh chan struct{}, + interval time.Duration, +) { + ticker := time.NewTicker(interval) + for { + select { + case <-ticker.C: + peerUpdateCh <- p2p.PeerUpdate{ + NodeID: factory.RandomNodeID(), + Status: p2p.PeerStatusUp, + } + case <-closeCh: + return + } + } +} + +func handleSnapshotRequests( + t *testing.T, + receivingCh chan p2p.Envelope, + sendingCh chan p2p.Envelope, + closeCh chan struct{}, + snapshots []snapshot, +) { + t.Helper() + for { + select { + case envelope := <-receivingCh: + _, ok := envelope.Message.(*ssproto.SnapshotsRequest) + require.True(t, ok) + for _, snapshot := range snapshots { + sendingCh <- p2p.Envelope{ + From: envelope.To, + Message: &ssproto.SnapshotsResponse{ + Height: snapshot.Height, + Format: snapshot.Format, + Chunks: snapshot.Chunks, + Hash: snapshot.Hash, + Metadata: snapshot.Metadata, + }, + } + } + case <-closeCh: + return + } + } +} + +func handleChunkRequests( + t *testing.T, + receivingCh chan p2p.Envelope, + sendingCh chan p2p.Envelope, + closeCh chan struct{}, + chunk []byte, +) { + t.Helper() + for { + select { + case envelope := <-receivingCh: + msg, ok := envelope.Message.(*ssproto.ChunkRequest) + require.True(t, ok) + sendingCh <- p2p.Envelope{ + From: envelope.To, + Message: &ssproto.ChunkResponse{ + Height: msg.Height, + Format: msg.Format, + Index: msg.Index, + Chunk: chunk, + Missing: false, + }, + } + + case <-closeCh: + return + } } } diff --git a/internal/statesync/snapshots.go b/internal/statesync/snapshots.go index 9058304a9..a0620e450 100644 --- a/internal/statesync/snapshots.go +++ b/internal/statesync/snapshots.go @@ -1,13 +1,11 @@ package statesync import ( - "context" "crypto/sha256" "fmt" "math/rand" "sort" "strings" - "time" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/types" @@ -43,8 +41,6 @@ func (s *snapshot) Key() snapshotKey { // snapshotPool discovers and aggregates snapshots across peers. type snapshotPool struct { - stateProvider StateProvider - tmsync.Mutex snapshots map[snapshotKey]*snapshot snapshotPeers map[snapshotKey]map[types.NodeID]types.NodeID @@ -60,10 +56,9 @@ type snapshotPool struct { snapshotBlacklist map[snapshotKey]bool } -// newSnapshotPool creates a new snapshot pool. The state source is used for -func newSnapshotPool(stateProvider StateProvider) *snapshotPool { +// newSnapshotPool creates a new empty snapshot pool. +func newSnapshotPool() *snapshotPool { return &snapshotPool{ - stateProvider: stateProvider, snapshots: make(map[snapshotKey]*snapshot), snapshotPeers: make(map[snapshotKey]map[types.NodeID]types.NodeID), formatIndex: make(map[uint32]map[snapshotKey]bool), @@ -80,14 +75,6 @@ func newSnapshotPool(stateProvider StateProvider) *snapshotPool { // snapshot height is verified using the light client, and the expected app hash // is set for the snapshot. func (p *snapshotPool) Add(peerID types.NodeID, snapshot *snapshot) (bool, error) { - ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) - defer cancel() - - appHash, err := p.stateProvider.AppHash(ctx, snapshot.Height) - if err != nil { - return false, fmt.Errorf("failed to get app hash: %w", err) - } - snapshot.trustedAppHash = appHash key := snapshot.Key() p.Lock() diff --git a/internal/statesync/snapshots_test.go b/internal/statesync/snapshots_test.go index 6f27269f7..08cb08269 100644 --- a/internal/statesync/snapshots_test.go +++ b/internal/statesync/snapshots_test.go @@ -3,10 +3,8 @@ package statesync import ( "testing" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/internal/statesync/mocks" "github.com/tendermint/tendermint/types" ) @@ -39,13 +37,10 @@ func TestSnapshot_Key(t *testing.T) { } func TestSnapshotPool_Add(t *testing.T) { - stateProvider := &mocks.StateProvider{} - stateProvider.On("AppHash", mock.Anything, uint64(1)).Return([]byte("app_hash"), nil) - peerID := types.NodeID("aa") // Adding to the pool should work - pool := newSnapshotPool(stateProvider) + pool := newSnapshotPool() added, err := pool.Add(peerID, &snapshot{ Height: 1, Format: 1, @@ -66,18 +61,12 @@ func TestSnapshotPool_Add(t *testing.T) { require.NoError(t, err) require.False(t, added) - // The pool should have populated the snapshot with the trusted app hash snapshot := pool.Best() require.NotNil(t, snapshot) - require.Equal(t, []byte("app_hash"), snapshot.trustedAppHash) - - stateProvider.AssertExpectations(t) } func TestSnapshotPool_GetPeer(t *testing.T) { - stateProvider := &mocks.StateProvider{} - stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - pool := newSnapshotPool(stateProvider) + pool := newSnapshotPool() s := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} @@ -112,9 +101,7 @@ func TestSnapshotPool_GetPeer(t *testing.T) { } func TestSnapshotPool_GetPeers(t *testing.T) { - stateProvider := &mocks.StateProvider{} - stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - pool := newSnapshotPool(stateProvider) + pool := newSnapshotPool() s := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} @@ -137,9 +124,7 @@ func TestSnapshotPool_GetPeers(t *testing.T) { } func TestSnapshotPool_Ranked_Best(t *testing.T) { - stateProvider := &mocks.StateProvider{} - stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - pool := newSnapshotPool(stateProvider) + pool := newSnapshotPool() // snapshots in expected order (best to worst). Highest height wins, then highest format. // Snapshots with different chunk hashes are considered different, and the most peers is @@ -182,9 +167,7 @@ func TestSnapshotPool_Ranked_Best(t *testing.T) { } func TestSnapshotPool_Reject(t *testing.T) { - stateProvider := &mocks.StateProvider{} - stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - pool := newSnapshotPool(stateProvider) + pool := newSnapshotPool() peerID := types.NodeID("aa") @@ -212,9 +195,7 @@ func TestSnapshotPool_Reject(t *testing.T) { } func TestSnapshotPool_RejectFormat(t *testing.T) { - stateProvider := &mocks.StateProvider{} - stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - pool := newSnapshotPool(stateProvider) + pool := newSnapshotPool() peerID := types.NodeID("aa") @@ -243,9 +224,7 @@ func TestSnapshotPool_RejectFormat(t *testing.T) { } func TestSnapshotPool_RejectPeer(t *testing.T) { - stateProvider := &mocks.StateProvider{} - stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - pool := newSnapshotPool(stateProvider) + pool := newSnapshotPool() peerAID := types.NodeID("aa") peerBID := types.NodeID("bb") @@ -285,9 +264,7 @@ func TestSnapshotPool_RejectPeer(t *testing.T) { } func TestSnapshotPool_RemovePeer(t *testing.T) { - stateProvider := &mocks.StateProvider{} - stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) - pool := newSnapshotPool(stateProvider) + pool := newSnapshotPool() peerAID := types.NodeID("aa") peerBID := types.NodeID("bb") diff --git a/internal/statesync/stateprovider.go b/internal/statesync/stateprovider.go index fd889dc51..b622824cd 100644 --- a/internal/statesync/stateprovider.go +++ b/internal/statesync/stateprovider.go @@ -1,7 +1,9 @@ package statesync import ( + "bytes" "context" + "errors" "fmt" "strings" "time" @@ -9,21 +11,25 @@ import ( dbm "github.com/tendermint/tm-db" tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/p2p" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/light" lightprovider "github.com/tendermint/tendermint/light/provider" lighthttp "github.com/tendermint/tendermint/light/provider/http" lightrpc "github.com/tendermint/tendermint/light/rpc" lightdb "github.com/tendermint/tendermint/light/store/db" + ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" rpchttp "github.com/tendermint/tendermint/rpc/client/http" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/version" ) //go:generate ../../scripts/mockery_generate.sh StateProvider // StateProvider is a provider of trusted state data for bootstrapping a node. This refers -// to the state.State object, not the state machine. +// to the state.State object, not the state machine. There are two implementations. One +// uses the P2P layer and the other uses the RPC layer. Both use light client verification. type StateProvider interface { // AppHash returns the app hash after the given height has been committed. AppHash(ctx context.Context, height uint64) ([]byte, error) @@ -33,20 +39,17 @@ type StateProvider interface { State(ctx context.Context, height uint64) (sm.State, error) } -// lightClientStateProvider is a state provider using the light client. -type lightClientStateProvider struct { +type stateProviderRPC struct { tmsync.Mutex // light.Client is not concurrency-safe lc *light.Client - version sm.Version initialHeight int64 providers map[lightprovider.Provider]string } -// NewLightClientStateProvider creates a new StateProvider using a light client and RPC clients. -func NewLightClientStateProvider( +// NewRPCStateProvider creates a new StateProvider using a light client and RPC clients. +func NewRPCStateProvider( ctx context.Context, chainID string, - version sm.Version, initialHeight int64, servers []string, trustOptions light.TrustOptions, @@ -75,51 +78,17 @@ func NewLightClientStateProvider( if err != nil { return nil, err } - return &lightClientStateProvider{ + return &stateProviderRPC{ lc: lc, - version: version, initialHeight: initialHeight, providers: providerRemotes, }, nil } -// NewLightClientStateProviderFromDispatcher creates a light client state -// provider but uses a p2p connected dispatched instead of RPC endpoints -func NewLightClientStateProviderFromDispatcher( - ctx context.Context, - chainID string, - version sm.Version, - initialHeight int64, - dispatcher *dispatcher, - trustOptions light.TrustOptions, - logger log.Logger, -) (StateProvider, error) { - providers := dispatcher.Providers(chainID, 30*time.Second) - if len(providers) < 2 { - return nil, fmt.Errorf("at least 2 peers are required, got %d", len(providers)) - } - - providersMap := make(map[lightprovider.Provider]string) - for _, p := range providers { - providersMap[p] = p.(*blockProvider).String() - } - - lc, err := light.NewClient(ctx, chainID, trustOptions, providers[0], providers[1:], - lightdb.New(dbm.NewMemDB()), light.Logger(logger)) - if err != nil { - return nil, err - } - - return &lightClientStateProvider{ - lc: lc, - version: version, - initialHeight: initialHeight, - providers: providersMap, - }, nil -} - -// AppHash implements StateProvider. -func (s *lightClientStateProvider) AppHash(ctx context.Context, height uint64) ([]byte, error) { +// AppHash implements part of StateProvider. It calls the application to verify the +// light blocks at heights h+1 and h+2 and, if verification succeeds, reports the app +// hash for the block at height h+1 which correlates to the state at height h. +func (s *stateProviderRPC) AppHash(ctx context.Context, height uint64) ([]byte, error) { s.Lock() defer s.Unlock() @@ -128,27 +97,19 @@ func (s *lightClientStateProvider) AppHash(ctx context.Context, height uint64) ( if err != nil { return nil, err } - // We also try to fetch the blocks at height H and H+2, since we need these + + // We also try to fetch the blocks at H+2, since we need these // when building the state while restoring the snapshot. This avoids the race // condition where we try to restore a snapshot before H+2 exists. - // - // FIXME This is a hack, since we can't add new methods to the interface without - // breaking it. We should instead have a Has(ctx, height) method which checks - // that the state provider has access to the necessary data for the height. - // We piggyback on AppHash() since it's called when adding snapshots to the pool. _, err = s.lc.VerifyLightBlockAtHeight(ctx, int64(height+2), time.Now()) if err != nil { return nil, err } - _, err = s.lc.VerifyLightBlockAtHeight(ctx, int64(height), time.Now()) - if err != nil { - return nil, err - } return header.AppHash, nil } // Commit implements StateProvider. -func (s *lightClientStateProvider) Commit(ctx context.Context, height uint64) (*types.Commit, error) { +func (s *stateProviderRPC) Commit(ctx context.Context, height uint64) (*types.Commit, error) { s.Lock() defer s.Unlock() header, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height), time.Now()) @@ -159,13 +120,12 @@ func (s *lightClientStateProvider) Commit(ctx context.Context, height uint64) (* } // State implements StateProvider. -func (s *lightClientStateProvider) State(ctx context.Context, height uint64) (sm.State, error) { +func (s *stateProviderRPC) State(ctx context.Context, height uint64) (sm.State, error) { s.Lock() defer s.Unlock() state := sm.State{ ChainID: s.lc.ChainID(), - Version: s.version, InitialHeight: s.initialHeight, } if state.InitialHeight == 0 { @@ -193,6 +153,10 @@ func (s *lightClientStateProvider) State(ctx context.Context, height uint64) (sm return sm.State{}, err } + state.Version = sm.Version{ + Consensus: currentLightBlock.Version, + Software: version.TMVersion, + } state.LastBlockHeight = lastLightBlock.Height state.LastBlockTime = lastLightBlock.Time state.LastBlockID = lastLightBlock.Commit.BlockID @@ -229,9 +193,220 @@ func rpcClient(server string) (*rpchttp.HTTP, error) { if !strings.Contains(server, "://") { server = "http://" + server } - c, err := rpchttp.New(server) + return rpchttp.New(server) +} + +type stateProviderP2P struct { + tmsync.Mutex // light.Client is not concurrency-safe + lc *light.Client + initialHeight int64 + paramsSendCh chan<- p2p.Envelope + paramsRecvCh chan types.ConsensusParams +} + +// NewP2PStateProvider creates a light client state +// provider but uses a dispatcher connected to the P2P layer +func NewP2PStateProvider( + ctx context.Context, + chainID string, + initialHeight int64, + providers []lightprovider.Provider, + trustOptions light.TrustOptions, + paramsSendCh chan<- p2p.Envelope, + logger log.Logger, +) (StateProvider, error) { + if len(providers) < 2 { + return nil, fmt.Errorf("at least 2 peers are required, got %d", len(providers)) + } + + lc, err := light.NewClient(ctx, chainID, trustOptions, providers[0], providers[1:], + lightdb.New(dbm.NewMemDB()), light.Logger(logger)) if err != nil { return nil, err } - return c, nil + + return &stateProviderP2P{ + lc: lc, + initialHeight: initialHeight, + paramsSendCh: paramsSendCh, + paramsRecvCh: make(chan types.ConsensusParams), + }, nil +} + +// AppHash implements StateProvider. +func (s *stateProviderP2P) AppHash(ctx context.Context, height uint64) ([]byte, error) { + s.Lock() + defer s.Unlock() + + // We have to fetch the next height, which contains the app hash for the previous height. + header, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height+1), time.Now()) + if err != nil { + return nil, err + } + + // We also try to fetch the blocks at H+2, since we need these + // when building the state while restoring the snapshot. This avoids the race + // condition where we try to restore a snapshot before H+2 exists. + _, err = s.lc.VerifyLightBlockAtHeight(ctx, int64(height+2), time.Now()) + if err != nil { + return nil, err + } + return header.AppHash, nil +} + +// Commit implements StateProvider. +func (s *stateProviderP2P) Commit(ctx context.Context, height uint64) (*types.Commit, error) { + s.Lock() + defer s.Unlock() + header, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height), time.Now()) + if err != nil { + return nil, err + } + return header.Commit, nil +} + +// State implements StateProvider. +func (s *stateProviderP2P) State(ctx context.Context, height uint64) (sm.State, error) { + s.Lock() + defer s.Unlock() + + state := sm.State{ + ChainID: s.lc.ChainID(), + InitialHeight: s.initialHeight, + } + if state.InitialHeight == 0 { + state.InitialHeight = 1 + } + + // The snapshot height maps onto the state heights as follows: + // + // height: last block, i.e. the snapshotted height + // height+1: current block, i.e. the first block we'll process after the snapshot + // height+2: next block, i.e. the second block after the snapshot + // + // We need to fetch the NextValidators from height+2 because if the application changed + // the validator set at the snapshot height then this only takes effect at height+2. + lastLightBlock, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height), time.Now()) + if err != nil { + return sm.State{}, err + } + currentLightBlock, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height+1), time.Now()) + if err != nil { + return sm.State{}, err + } + nextLightBlock, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height+2), time.Now()) + if err != nil { + return sm.State{}, err + } + + state.Version = sm.Version{ + Consensus: currentLightBlock.Version, + Software: version.TMVersion, + } + state.LastBlockHeight = lastLightBlock.Height + state.LastBlockTime = lastLightBlock.Time + state.LastBlockID = lastLightBlock.Commit.BlockID + state.AppHash = currentLightBlock.AppHash + state.LastResultsHash = currentLightBlock.LastResultsHash + state.LastValidators = lastLightBlock.ValidatorSet + state.Validators = currentLightBlock.ValidatorSet + state.NextValidators = nextLightBlock.ValidatorSet + state.LastHeightValidatorsChanged = nextLightBlock.Height + + // We'll also need to fetch consensus params via P2P. + state.ConsensusParams, err = s.consensusParams(ctx, currentLightBlock.Height) + if err != nil { + return sm.State{}, err + } + // validate the consensus params + if !bytes.Equal(nextLightBlock.ConsensusHash, state.ConsensusParams.HashConsensusParams()) { + return sm.State{}, fmt.Errorf("consensus params hash mismatch at height %d. Expected %v, got %v", + currentLightBlock.Height, nextLightBlock.ConsensusHash, state.ConsensusParams.HashConsensusParams()) + } + // set the last height changed to the current height + state.LastHeightConsensusParamsChanged = currentLightBlock.Height + + return state, nil +} + +// addProvider dynamically adds a peer as a new witness. A limit of 6 providers is kept as a +// heuristic. Too many overburdens the network and too little compromises the second layer of security. +func (s *stateProviderP2P) addProvider(p lightprovider.Provider) { + if len(s.lc.Witnesses()) < 6 { + s.lc.AddProvider(p) + } +} + +// consensusParams sends out a request for consensus params blocking +// until one is returned. +// +// If it fails to get a valid set of consensus params from any of the +// providers it returns an error; however, it will retry indefinitely +// (with backoff) until the context is canceled. +func (s *stateProviderP2P) consensusParams(ctx context.Context, height int64) (types.ConsensusParams, error) { + iterCount := 0 + for { + params, err := s.tryGetConsensusParamsFromWitnesses(ctx, height) + if err != nil { + return types.ConsensusParams{}, err + } + if params != nil { + return *params, nil + } + iterCount++ + + select { + case <-ctx.Done(): + return types.ConsensusParams{}, ctx.Err() + case <-time.After(time.Duration(iterCount) * consensusParamsResponseTimeout): + } + } +} + +// tryGetConsensusParamsFromWitnesses attempts to get consensus +// parameters from the light clients available witnesses. If both +// return parameters are nil, then it can be retried. +func (s *stateProviderP2P) tryGetConsensusParamsFromWitnesses( + ctx context.Context, + height int64, +) (*types.ConsensusParams, error) { + for _, provider := range s.lc.Witnesses() { + p, ok := provider.(*BlockProvider) + if !ok { + panic("expected p2p state provider to use p2p block providers") + } + + // extract the nodeID of the provider + peer, err := types.NewNodeID(p.String()) + if err != nil { + return nil, fmt.Errorf("invalid provider (%s) node id: %w", p.String(), err) + } + + select { + case s.paramsSendCh <- p2p.Envelope{ + To: peer, + Message: &ssproto.ParamsRequest{ + Height: uint64(height), + }, + }: + case <-ctx.Done(): + return nil, ctx.Err() + } + + select { + // if we get no response from this provider we move on to the next one + case <-time.After(consensusParamsResponseTimeout): + continue + case <-ctx.Done(): + return nil, ctx.Err() + case params, ok := <-s.paramsRecvCh: + if !ok { + return nil, errors.New("params channel closed") + } + return ¶ms, nil + } + } + + // signal to caller to retry. + return nil, nil } diff --git a/internal/statesync/syncer.go b/internal/statesync/syncer.go index 5dc8aeb8c..b4212961a 100644 --- a/internal/statesync/syncer.go +++ b/internal/statesync/syncer.go @@ -11,10 +11,11 @@ import ( "github.com/tendermint/tendermint/config" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/light" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -40,14 +41,11 @@ var ( errRejectSender = errors.New("snapshot sender was rejected") // errVerifyFailed is returned by Sync() when app hash or last height // verification fails. - errVerifyFailed = errors.New("verification failed") + errVerifyFailed = errors.New("verification with app failed") // errTimeout is returned by Sync() when we've waited too long to receive a chunk. errTimeout = errors.New("timed out waiting for chunk") // errNoSnapshots is returned by SyncAny() if no snapshots are found and discovery is disabled. errNoSnapshots = errors.New("no suitable snapshots found") - // errStateCommitTimeout is returned by Sync() when the timeout for retrieving - // tendermint state or the commit is exceeded - errStateCommitTimeout = errors.New("timed out trying to retrieve state and commit") ) // syncer runs a state sync against an ABCI app. Use either SyncAny() to automatically attempt to @@ -65,8 +63,14 @@ type syncer struct { fetchers int32 retryTimeout time.Duration - mtx tmsync.RWMutex - chunks *chunkQueue + mtx tmsync.RWMutex + chunks *chunkQueue + metrics *Metrics + + avgChunkTime int64 + lastSyncedSnapshotHeight int64 + processingSnapshot *snapshot + closeCh <-chan struct{} } // newSyncer creates a new syncer. @@ -76,20 +80,25 @@ func newSyncer( conn proxy.AppConnSnapshot, connQuery proxy.AppConnQuery, stateProvider StateProvider, - snapshotCh, chunkCh chan<- p2p.Envelope, + snapshotCh chan<- p2p.Envelope, + chunkCh chan<- p2p.Envelope, + closeCh <-chan struct{}, tempDir string, + metrics *Metrics, ) *syncer { return &syncer{ logger: logger, stateProvider: stateProvider, conn: conn, connQuery: connQuery, - snapshots: newSnapshotPool(stateProvider), + snapshots: newSnapshotPool(), snapshotCh: snapshotCh, chunkCh: chunkCh, tempDir: tempDir, fetchers: cfg.Fetchers, retryTimeout: cfg.ChunkRequestTimeout, + metrics: metrics, + closeCh: closeCh, } } @@ -123,6 +132,7 @@ func (s *syncer) AddSnapshot(peerID types.NodeID, snapshot *snapshot) (bool, err return false, err } if added { + s.metrics.TotalSnapshots.Add(1) s.logger.Info("Discovered new snapshot", "height", snapshot.Height, "format", snapshot.Format, "hash", snapshot.Hash) } @@ -131,12 +141,29 @@ func (s *syncer) AddSnapshot(peerID types.NodeID, snapshot *snapshot) (bool, err // AddPeer adds a peer to the pool. For now we just keep it simple and send a // single request to discover snapshots, later we may want to do retries and stuff. -func (s *syncer) AddPeer(peerID types.NodeID) { +func (s *syncer) AddPeer(peerID types.NodeID) (err error) { + defer func() { + // TODO: remove panic recover once AddPeer can no longer accientally send on + // closed channel. + // This recover was added to protect against the p2p message being sent + // to the snapshot channel after the snapshot channel was closed. + if r := recover(); r != nil { + err = fmt.Errorf("panic sending peer snapshot request: %v", r) + } + }() + s.logger.Debug("Requesting snapshots from peer", "peer", peerID) - s.snapshotCh <- p2p.Envelope{ + + msg := p2p.Envelope{ To: peerID, Message: &ssproto.SnapshotsRequest{}, } + + select { + case <-s.closeCh: + case s.snapshotCh <- msg: + } + return err } // RemovePeer removes a peer from the pool. @@ -153,7 +180,6 @@ func (s *syncer) SyncAny( discoveryTime time.Duration, requestSnapshots func(), ) (sm.State, *types.Commit, error) { - if discoveryTime != 0 && discoveryTime < minimumDiscoveryTime { discoveryTime = minimumDiscoveryTime } @@ -181,7 +207,6 @@ func (s *syncer) SyncAny( if discoveryTime == 0 { return sm.State{}, nil, errNoSnapshots } - requestSnapshots() s.logger.Info(fmt.Sprintf("Discovering snapshots for %v", discoveryTime)) time.Sleep(discoveryTime) continue @@ -194,9 +219,14 @@ func (s *syncer) SyncAny( defer chunks.Close() // in case we forget to close it elsewhere } + s.processingSnapshot = snapshot + s.metrics.SnapshotChunkTotal.Set(float64(snapshot.Chunks)) + newState, commit, err := s.Sync(ctx, snapshot, chunks) switch { case err == nil: + s.metrics.SnapshotHeight.Set(float64(snapshot.Height)) + s.lastSyncedSnapshotHeight = int64(snapshot.Height) return newState, commit, nil case errors.Is(err, errAbort): @@ -230,10 +260,6 @@ func (s *syncer) SyncAny( s.logger.Info("Snapshot sender rejected", "peer", peer) } - case errors.Is(err, errStateCommitTimeout): - s.logger.Info("Timed out retrieving state and commit, rejecting and retrying...", "height", snapshot.Height) - s.snapshots.Reject(snapshot) - default: return sm.State{}, nil, fmt.Errorf("snapshot restoration failed: %w", err) } @@ -245,6 +271,7 @@ func (s *syncer) SyncAny( } snapshot = nil chunks = nil + s.processingSnapshot = nil } } @@ -264,8 +291,29 @@ func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueu s.mtx.Unlock() }() + hctx, hcancel := context.WithTimeout(ctx, 30*time.Second) + defer hcancel() + + // Fetch the app hash corresponding to the snapshot + appHash, err := s.stateProvider.AppHash(hctx, snapshot.Height) + if err != nil { + // check if the main context was triggered + if ctx.Err() != nil { + return sm.State{}, nil, ctx.Err() + } + // catch the case where all the light client providers have been exhausted + if err == light.ErrNoWitnesses { + return sm.State{}, nil, + fmt.Errorf("failed to get app hash at height %d. No witnesses remaining", snapshot.Height) + } + s.logger.Info("failed to get and verify tendermint state. Dropping snapshot and trying again", + "err", err, "height", snapshot.Height) + return sm.State{}, nil, errRejectSnapshot + } + snapshot.trustedAppHash = appHash + // Offer snapshot to ABCI app. - err := s.offerSnapshot(ctx, snapshot) + err = s.offerSnapshot(ctx, snapshot) if err != nil { return sm.State{}, nil, err } @@ -273,35 +321,46 @@ func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueu // Spawn chunk fetchers. They will terminate when the chunk queue is closed or context canceled. fetchCtx, cancel := context.WithCancel(ctx) defer cancel() + fetchStartTime := time.Now() for i := int32(0); i < s.fetchers; i++ { go s.fetchChunks(fetchCtx, snapshot, chunks) } - pctx, pcancel := context.WithTimeout(ctx, 30*time.Second) + pctx, pcancel := context.WithTimeout(ctx, 1*time.Minute) defer pcancel() // Optimistically build new state, so we don't discover any light client failures at the end. state, err := s.stateProvider.State(pctx, snapshot.Height) if err != nil { - // check if the provider context exceeded the 10 second deadline - if err == context.DeadlineExceeded && ctx.Err() == nil { - return sm.State{}, nil, errStateCommitTimeout + // check if the main context was triggered + if ctx.Err() != nil { + return sm.State{}, nil, ctx.Err() } - - return sm.State{}, nil, fmt.Errorf("failed to build new state: %w", err) + if err == light.ErrNoWitnesses { + return sm.State{}, nil, + fmt.Errorf("failed to get tendermint state at height %d. No witnesses remaining", snapshot.Height) + } + s.logger.Info("failed to get and verify tendermint state. Dropping snapshot and trying again", + "err", err, "height", snapshot.Height) + return sm.State{}, nil, errRejectSnapshot } commit, err := s.stateProvider.Commit(pctx, snapshot.Height) if err != nil { // check if the provider context exceeded the 10 second deadline - if err == context.DeadlineExceeded && ctx.Err() == nil { - return sm.State{}, nil, errStateCommitTimeout + if ctx.Err() != nil { + return sm.State{}, nil, ctx.Err() } - - return sm.State{}, nil, fmt.Errorf("failed to fetch commit: %w", err) + if err == light.ErrNoWitnesses { + return sm.State{}, nil, + fmt.Errorf("failed to get commit at height %d. No witnesses remaining", snapshot.Height) + } + s.logger.Info("failed to get and verify commit. Dropping snapshot and trying again", + "err", err, "height", snapshot.Height) + return sm.State{}, nil, errRejectSnapshot } // Restore snapshot - err = s.applyChunks(ctx, chunks) + err = s.applyChunks(ctx, chunks, fetchStartTime) if err != nil { return sm.State{}, nil, err } @@ -358,7 +417,7 @@ func (s *syncer) offerSnapshot(ctx context.Context, snapshot *snapshot) error { // applyChunks applies chunks to the app. It returns various errors depending on the app's // response, or nil once the snapshot is fully restored. -func (s *syncer) applyChunks(ctx context.Context, chunks *chunkQueue) error { +func (s *syncer) applyChunks(ctx context.Context, chunks *chunkQueue, start time.Time) error { for { chunk, err := chunks.Next() if err == errDone { @@ -400,6 +459,9 @@ func (s *syncer) applyChunks(ctx context.Context, chunks *chunkQueue) error { switch resp.Result { case abci.ResponseApplySnapshotChunk_ACCEPT: + s.metrics.SnapshotChunk.Add(1) + s.avgChunkTime = time.Since(start).Nanoseconds() / int64(chunks.numChunksReturned()) + s.metrics.ChunkProcessAvgTime.Set(float64(s.avgChunkTime)) case abci.ResponseApplySnapshotChunk_ABORT: return errAbort case abci.ResponseApplySnapshotChunk_RETRY: @@ -432,6 +494,8 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch select { case <-ctx.Done(): return + case <-s.closeCh: + return case <-time.After(2 * time.Second): continue } @@ -458,6 +522,8 @@ func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *ch case <-ctx.Done(): return + case <-s.closeCh: + return } ticker.Stop() @@ -481,7 +547,7 @@ func (s *syncer) requestChunk(snapshot *snapshot, chunk uint32) { "peer", peer, ) - s.chunkCh <- p2p.Envelope{ + msg := p2p.Envelope{ To: peer, Message: &ssproto.ChunkRequest{ Height: snapshot.Height, @@ -489,6 +555,11 @@ func (s *syncer) requestChunk(snapshot *snapshot, chunk uint32) { Index: chunk, }, } + + select { + case s.chunkCh <- msg: + case <-s.closeCh: + } } // verifyApp verifies the sync, checking the app hash and last block height. It returns the diff --git a/internal/statesync/syncer_test.go b/internal/statesync/syncer_test.go index c1d6b462a..ad902a54c 100644 --- a/internal/statesync/syncer_test.go +++ b/internal/statesync/syncer_test.go @@ -13,11 +13,11 @@ import ( abci "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/internal/proxy" + proxymocks "github.com/tendermint/tendermint/internal/proxy/mocks" + sm "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/internal/statesync/mocks" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - "github.com/tendermint/tendermint/proxy" - proxymocks "github.com/tendermint/tendermint/proxy/mocks" - sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) @@ -70,17 +70,21 @@ func TestSyncer_SyncAny(t *testing.T) { peerCID := types.NodeID("cc") rts := setup(t, connSnapshot, connQuery, stateProvider, 3) + rts.reactor.syncer = rts.syncer + // Adding a chunk should error when no sync is in progress _, err := rts.syncer.AddChunk(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{1}}) require.Error(t, err) // Adding a couple of peers should trigger snapshot discovery messages - rts.syncer.AddPeer(peerAID) + err = rts.syncer.AddPeer(peerAID) + require.NoError(t, err) e := <-rts.snapshotOutCh require.Equal(t, &ssproto.SnapshotsRequest{}, e.Message) require.Equal(t, peerAID, e.To) - rts.syncer.AddPeer(peerBID) + err = rts.syncer.AddPeer(peerBID) + require.NoError(t, err) e = <-rts.snapshotOutCh require.Equal(t, &ssproto.SnapshotsRequest{}, e.Message) require.Equal(t, peerBID, e.To) @@ -195,6 +199,16 @@ func TestSyncer_SyncAny(t *testing.T) { require.Equal(t, expectState, newState) require.Equal(t, commit, lastCommit) + require.Equal(t, len(chunks), int(rts.syncer.processingSnapshot.Chunks)) + require.Equal(t, expectState.LastBlockHeight, rts.syncer.lastSyncedSnapshotHeight) + require.True(t, rts.syncer.avgChunkTime > 0) + + require.Equal(t, int64(rts.syncer.processingSnapshot.Chunks), rts.reactor.SnapshotChunksTotal()) + require.Equal(t, rts.syncer.lastSyncedSnapshotHeight, rts.reactor.SnapshotHeight()) + require.Equal(t, time.Duration(rts.syncer.avgChunkTime), rts.reactor.ChunkProcessAvgTime()) + require.Equal(t, int64(len(rts.syncer.snapshots.snapshots)), rts.reactor.TotalSnapshots()) + require.Equal(t, int64(0), rts.reactor.SnapshotChunksCount()) + connSnapshot.AssertExpectations(t) connQuery.AssertExpectations(t) } @@ -448,6 +462,9 @@ func TestSyncer_applyChunks_Results(t *testing.T) { body := []byte{1, 2, 3} chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 1}, "") require.NoError(t, err) + + fetchStartTime := time.Now() + _, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: body}) require.NoError(t, err) @@ -461,7 +478,7 @@ func TestSyncer_applyChunks_Results(t *testing.T) { Result: abci.ResponseApplySnapshotChunk_ACCEPT}, nil) } - err = rts.syncer.applyChunks(ctx, chunks) + err = rts.syncer.applyChunks(ctx, chunks, fetchStartTime) if tc.expectErr == unknownErr { require.Error(t, err) } else { @@ -498,6 +515,9 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 3}, "") require.NoError(t, err) + + fetchStartTime := time.Now() + added, err := chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{0}}) require.True(t, added) require.NoError(t, err) @@ -526,7 +546,7 @@ func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { // check the queue contents, and finally close the queue to end the goroutine. // We don't really care about the result of applyChunks, since it has separate test. go func() { - rts.syncer.applyChunks(ctx, chunks) //nolint:errcheck // purposefully ignore error + rts.syncer.applyChunks(ctx, chunks, fetchStartTime) //nolint:errcheck // purposefully ignore error }() time.Sleep(50 * time.Millisecond) @@ -588,6 +608,8 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { chunks, err := newChunkQueue(s1, "") require.NoError(t, err) + fetchStartTime := time.Now() + added, err := chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{0}, Sender: peerAID}) require.True(t, added) require.NoError(t, err) @@ -625,7 +647,7 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { // However, it will block on e.g. retry result, so we spawn a goroutine that will // be shut down when the chunk queue closes. go func() { - rts.syncer.applyChunks(ctx, chunks) //nolint:errcheck // purposefully ignore error + rts.syncer.applyChunks(ctx, chunks, fetchStartTime) //nolint:errcheck // purposefully ignore error }() time.Sleep(50 * time.Millisecond) diff --git a/store/store.go b/internal/store/store.go similarity index 99% rename from store/store.go rename to internal/store/store.go index 8848b76d9..c978241ff 100644 --- a/store/store.go +++ b/internal/store/store.go @@ -345,7 +345,7 @@ func (bs *BlockStore) pruneRange( var ( err error pruned uint64 - totalPruned uint64 = 0 + totalPruned uint64 ) batch := bs.db.NewBatch() @@ -392,7 +392,7 @@ func (bs *BlockStore) batchDelete( start, end []byte, preDeletionHook func(key, value []byte, batch dbm.Batch) error, ) (uint64, []byte, error) { - var pruned uint64 = 0 + var pruned uint64 iter, err := bs.db.Iterator(start, end) if err != nil { return pruned, start, err diff --git a/store/store_test.go b/internal/store/store_test.go similarity index 96% rename from store/store_test.go rename to internal/store/store_test.go index 2132d9aff..03db91fa8 100644 --- a/store/store_test.go +++ b/internal/store/store_test.go @@ -12,13 +12,13 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/test/factory" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) @@ -46,13 +46,13 @@ func makeTestCommit(height int64, timestamp time.Time) *types.Commit { } func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFunc) { - config := cfg.ResetTestRoot("blockchain_reactor_test") + cfg := config.ResetTestRoot("blockchain_reactor_test") blockDB := dbm.NewMemDB() - state, err := sm.MakeGenesisStateFromFile(config.GenesisFile()) + state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) if err != nil { panic(fmt.Errorf("error constructing state from genesis file: %w", err)) } - return state, NewBlockStore(blockDB), func() { os.RemoveAll(config.RootDir) } + return state, NewBlockStore(blockDB), func() { os.RemoveAll(cfg.RootDir) } } func freshBlockStore() (*BlockStore, dbm.DB) { @@ -292,9 +292,9 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } func TestLoadBaseMeta(t *testing.T) { - config := cfg.ResetTestRoot("blockchain_reactor_test") - defer os.RemoveAll(config.RootDir) - state, err := sm.MakeGenesisStateFromFile(config.GenesisFile()) + cfg := config.ResetTestRoot("blockchain_reactor_test") + defer os.RemoveAll(cfg.RootDir) + state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) require.NoError(t, err) bs := NewBlockStore(dbm.NewMemDB()) @@ -348,9 +348,9 @@ func TestLoadBlockPart(t *testing.T) { } func TestPruneBlocks(t *testing.T) { - config := cfg.ResetTestRoot("blockchain_reactor_test") - defer os.RemoveAll(config.RootDir) - state, err := sm.MakeGenesisStateFromFile(config.GenesisFile()) + cfg := config.ResetTestRoot("blockchain_reactor_test") + defer os.RemoveAll(cfg.RootDir) + state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile()) require.NoError(t, err) db := dbm.NewMemDB() bs := NewBlockStore(db) diff --git a/internal/test/factory/doc.go b/internal/test/factory/doc.go new file mode 100644 index 000000000..5b6b313f6 --- /dev/null +++ b/internal/test/factory/doc.go @@ -0,0 +1,6 @@ +/* +Package factory provides generation code for common structs in Tendermint. +It is used primarily for the testing of internal components such as statesync, +consensus, blocksync etc.. +*/ +package factory diff --git a/internal/test/factory/factory_test.go b/internal/test/factory/factory_test.go index 25f234508..07a3ef8b3 100644 --- a/internal/test/factory/factory_test.go +++ b/internal/test/factory/factory_test.go @@ -12,3 +12,7 @@ func TestMakeHeader(t *testing.T) { _, err := MakeHeader(&types.Header{}) assert.NoError(t, err) } + +func TestRandomNodeID(t *testing.T) { + assert.NotPanics(t, func() { RandomNodeID() }) +} diff --git a/internal/test/factory/genesis.go b/internal/test/factory/genesis.go index 31ec1674f..d3a0a8464 100644 --- a/internal/test/factory/genesis.go +++ b/internal/test/factory/genesis.go @@ -3,13 +3,13 @@ package factory import ( "sort" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/types" ) func RandGenesisDoc( - config *cfg.Config, + cfg *config.Config, numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) { @@ -29,7 +29,7 @@ func RandGenesisDoc( return &types.GenesisDoc{ GenesisTime: tmtime.Now(), InitialHeight: 1, - ChainID: config.ChainID(), + ChainID: cfg.ChainID(), Validators: validators, }, privValidators } diff --git a/internal/test/factory/p2p.go b/internal/test/factory/p2p.go new file mode 100644 index 000000000..34c139f58 --- /dev/null +++ b/internal/test/factory/p2p.go @@ -0,0 +1,27 @@ +package factory + +import ( + "encoding/hex" + "strings" + + "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/types" +) + +// NodeID returns a valid NodeID based on an inputted string +func NodeID(str string) types.NodeID { + id, err := types.NewNodeID(strings.Repeat(str, 2*types.NodeIDByteLength)) + if err != nil { + panic(err) + } + return id +} + +// RandomNodeID returns a randomly generated valid NodeID +func RandomNodeID() types.NodeID { + id, err := types.NewNodeID(hex.EncodeToString(rand.Bytes(types.NodeIDByteLength))) + if err != nil { + panic(err) + } + return id +} diff --git a/libs/bits/bit_array.go b/libs/bits/bit_array.go index b78fafddd..a0258521c 100644 --- a/libs/bits/bit_array.go +++ b/libs/bits/bit_array.go @@ -30,9 +30,21 @@ func NewBitArray(bits int) *BitArray { if bits <= 0 { return nil } - return &BitArray{ - Bits: bits, - Elems: make([]uint64, numElems(bits)), + bA := &BitArray{} + bA.reset(bits) + return bA +} + +// reset changes size of BitArray to `bits` and re-allocates (zeroed) data buffer +func (bA *BitArray) reset(bits int) { + bA.mtx.Lock() + defer bA.mtx.Unlock() + + bA.Bits = bits + if bits == 0 { + bA.Elems = nil + } else { + bA.Elems = make([]uint64, numElems(bits)) } } @@ -399,8 +411,7 @@ func (bA *BitArray) UnmarshalJSON(bz []byte) error { if b == "null" { // This is required e.g. for encoding/json when decoding // into a pointer with pre-allocated BitArray. - bA.Bits = 0 - bA.Elems = nil + bA.reset(0) return nil } @@ -410,16 +421,15 @@ func (bA *BitArray) UnmarshalJSON(bz []byte) error { return fmt.Errorf("bitArray in JSON should be a string of format %q but got %s", bitArrayJSONRegexp.String(), b) } bits := match[1] - - // Construct new BitArray and copy over. numBits := len(bits) - bA2 := NewBitArray(numBits) + + bA.reset(numBits) for i := 0; i < numBits; i++ { if bits[i] == 'x' { - bA2.SetIndex(i, true) + bA.SetIndex(i, true) } } - *bA = *bA2 //nolint:govet + return nil } diff --git a/libs/bytes/bytes.go b/libs/bytes/bytes.go index cfb7a8db2..dd8e39737 100644 --- a/libs/bytes/bytes.go +++ b/libs/bytes/bytes.go @@ -27,15 +27,22 @@ func (bz *HexBytes) Unmarshal(data []byte) error { return nil } -// MarshalJSON implements the json.Marshaler interface. The hex bytes is a -// quoted hexadecimal encoded string. +// MarshalJSON implements the json.Marshaler interface. The encoding is a JSON +// quoted string of hexadecimal digits. func (bz HexBytes) MarshalJSON() ([]byte, error) { - s := strings.ToUpper(hex.EncodeToString(bz)) - jbz := make([]byte, len(s)+2) - jbz[0] = '"' - copy(jbz[1:], s) - jbz[len(jbz)-1] = '"' - return jbz, nil + size := hex.EncodedLen(len(bz)) + 2 // +2 for quotation marks + buf := make([]byte, size) + hex.Encode(buf[1:], []byte(bz)) + buf[0] = '"' + buf[size-1] = '"' + + // Ensure letter digits are capitalized. + for i := 1; i < size-1; i++ { + if buf[i] >= 'a' && buf[i] <= 'f' { + buf[i] = 'A' + (buf[i] - 'a') + } + } + return buf, nil } // UnmarshalJSON implements the json.Umarshaler interface. diff --git a/libs/bytes/bytes_test.go b/libs/bytes/bytes_test.go index db882f1c1..6a9ca7c3d 100644 --- a/libs/bytes/bytes_test.go +++ b/libs/bytes/bytes_test.go @@ -37,6 +37,7 @@ func TestJSONMarshal(t *testing.T) { {[]byte(``), `{"B1":"","B2":""}`}, {[]byte(`a`), `{"B1":"YQ==","B2":"61"}`}, {[]byte(`abc`), `{"B1":"YWJj","B2":"616263"}`}, + {[]byte("\x1a\x2b\x3c"), `{"B1":"Gis8","B2":"1A2B3C"}`}, } for i, tc := range cases { diff --git a/libs/cmap/cmap.go b/libs/cmap/cmap.go index 539870363..5aa82e807 100644 --- a/libs/cmap/cmap.go +++ b/libs/cmap/cmap.go @@ -22,6 +22,20 @@ func (cm *CMap) Set(key string, value interface{}) { cm.l.Unlock() } +// GetOrSet returns the existing value if present. Othewise, it stores `newValue` and returns it. +func (cm *CMap) GetOrSet(key string, newValue interface{}) (value interface{}, alreadyExists bool) { + + cm.l.Lock() + defer cm.l.Unlock() + + if v, ok := cm.m[key]; ok { + return v, true + } + + cm.m[key] = newValue + return newValue, false +} + func (cm *CMap) Get(key string) interface{} { cm.l.Lock() val := cm.m[key] diff --git a/libs/cmap/cmap_test.go b/libs/cmap/cmap_test.go index bab78da96..68a052bdb 100644 --- a/libs/cmap/cmap_test.go +++ b/libs/cmap/cmap_test.go @@ -3,6 +3,7 @@ package cmap import ( "fmt" "strings" + "sync" "testing" "github.com/stretchr/testify/assert" @@ -67,3 +68,46 @@ func BenchmarkCMapHas(b *testing.B) { m.Has(string(rune(i))) } } + +func TestCMap_GetOrSet_Parallel(t *testing.T) { + + tests := []struct { + name string + newValue interface{} + parallelism int + }{ + {"test1", "a", 4}, + {"test2", "a", 40}, + {"test3", "a", 1}, + } + + //nolint:scopelint + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cm := NewCMap() + + wg := sync.WaitGroup{} + wg.Add(tt.parallelism) + for i := 0; i < tt.parallelism; i++ { + go func() { + defer wg.Done() + gotValue, _ := cm.GetOrSet(tt.name, tt.newValue) + assert.EqualValues(t, tt.newValue, gotValue) + }() + } + wg.Wait() + }) + } +} + +func TestCMap_GetOrSet_Exists(t *testing.T) { + cm := NewCMap() + + gotValue, exists := cm.GetOrSet("key", 1000) + assert.False(t, exists) + assert.EqualValues(t, 1000, gotValue) + + gotValue, exists = cm.GetOrSet("key", 2000) + assert.True(t, exists) + assert.EqualValues(t, 1000, gotValue) +} diff --git a/libs/json/helpers_test.go b/libs/json/helpers_test.go index a87bc51f1..ccb3c0038 100644 --- a/libs/json/helpers_test.go +++ b/libs/json/helpers_test.go @@ -61,7 +61,6 @@ func (c CustomValue) MarshalJSON() ([]byte, error) { } func (c CustomValue) UnmarshalJSON(bz []byte) error { - c.Value = "custom" return nil } diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 54a030fe8..68d1ec941 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -231,34 +231,45 @@ func (s *Server) Unsubscribe(ctx context.Context, args UnsubscribeArgs) error { return err } var qs string + if args.Query != nil { qs = args.Query.String() } - s.mtx.RLock() - clientSubscriptions, ok := s.subscriptions[args.Subscriber] - if args.ID != "" { - qs, ok = clientSubscriptions[args.ID] + clientSubscriptions, err := func() (map[string]string, error) { + s.mtx.RLock() + defer s.mtx.RUnlock() - if ok && args.Query == nil { - var err error - args.Query, err = query.New(qs) - if err != nil { - return err + clientSubscriptions, ok := s.subscriptions[args.Subscriber] + if args.ID != "" { + qs, ok = clientSubscriptions[args.ID] + + if ok && args.Query == nil { + var err error + args.Query, err = query.New(qs) + if err != nil { + return nil, err + } } + } else if qs != "" { + args.ID, ok = clientSubscriptions[qs] } - } else if qs != "" { - args.ID, ok = clientSubscriptions[qs] - } - s.mtx.RUnlock() - if !ok { - return ErrSubscriptionNotFound + if !ok { + return nil, ErrSubscriptionNotFound + } + + return clientSubscriptions, nil + }() + + if err != nil { + return err } select { case s.cmds <- cmd{op: unsub, clientID: args.Subscriber, query: args.Query, subscription: &Subscription{id: args.ID}}: s.mtx.Lock() + defer s.mtx.Unlock() delete(clientSubscriptions, args.ID) delete(clientSubscriptions, qs) @@ -266,7 +277,6 @@ func (s *Server) Unsubscribe(ctx context.Context, args UnsubscribeArgs) error { if len(clientSubscriptions) == 0 { delete(s.subscriptions, args.Subscriber) } - s.mtx.Unlock() return nil case <-ctx.Done(): return ctx.Err() @@ -288,8 +298,10 @@ func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { select { case s.cmds <- cmd{op: unsub, clientID: clientID}: s.mtx.Lock() + defer s.mtx.Unlock() + delete(s.subscriptions, clientID) - s.mtx.Unlock() + return nil case <-ctx.Done(): return ctx.Err() @@ -495,7 +507,10 @@ func (state *state) send(msg interface{}, events []types.Event) error { for clientID, subscription := range clientSubscriptions { if cap(subscription.out) == 0 { // block on unbuffered channel - subscription.out <- NewMessage(subscription.id, msg, events) + select { + case subscription.out <- NewMessage(subscription.id, msg, events): + case <-subscription.canceled: + } } else { // don't block on buffered channels select { diff --git a/libs/time/time.go b/libs/time/time.go index 022bdf574..786f9bbb4 100644 --- a/libs/time/time.go +++ b/libs/time/time.go @@ -1,7 +1,6 @@ package time import ( - "sort" "time" ) @@ -16,43 +15,3 @@ func Now() time.Time { func Canonical(t time.Time) time.Time { return t.Round(0).UTC() } - -// WeightedTime for computing a median. -type WeightedTime struct { - Time time.Time - Weight int64 -} - -// NewWeightedTime with time and weight. -func NewWeightedTime(time time.Time, weight int64) *WeightedTime { - return &WeightedTime{ - Time: time, - Weight: weight, - } -} - -// WeightedMedian computes weighted median time for a given array of WeightedTime and the total voting power. -func WeightedMedian(weightedTimes []*WeightedTime, totalVotingPower int64) (res time.Time) { - median := totalVotingPower / 2 - - sort.Slice(weightedTimes, func(i, j int) bool { - if weightedTimes[i] == nil { - return false - } - if weightedTimes[j] == nil { - return true - } - return weightedTimes[i].Time.UnixNano() < weightedTimes[j].Time.UnixNano() - }) - - for _, weightedTime := range weightedTimes { - if weightedTime != nil { - if median <= weightedTime.Weight { - res = weightedTime.Time - break - } - median -= weightedTime.Weight - } - } - return -} diff --git a/light/client.go b/light/client.go index 52bbdf981..cc606f496 100644 --- a/light/client.go +++ b/light/client.go @@ -52,6 +52,8 @@ const ( // 10s is sufficient for most networks. defaultMaxBlockLag = 10 * time.Second + + defaultProviderTimeout = 10 * time.Second ) // Option sets a parameter for the light client. @@ -61,9 +63,7 @@ type Option func(*Client) // check the blocks (every block, in ascending height order). Note this is // much slower than SkippingVerification, albeit more secure. func SequentialVerification() Option { - return func(c *Client) { - c.verificationMode = sequential - } + return func(c *Client) { c.verificationMode = sequential } } // SkippingVerification option configures the light client to skip blocks as @@ -87,24 +87,18 @@ func SkippingVerification(trustLevel tmmath.Fraction) Option { // the h amount of light blocks will be removed from the store. // Default: 1000. A pruning size of 0 will not prune the light client at all. func PruningSize(h uint16) Option { - return func(c *Client) { - c.pruningSize = h - } + return func(c *Client) { c.pruningSize = h } } // Logger option can be used to set a logger for the client. func Logger(l log.Logger) Option { - return func(c *Client) { - c.logger = l - } + return func(c *Client) { c.logger = l } } // MaxClockDrift defines how much new header's time can drift into // the future relative to the light clients local time. Default: 10s. func MaxClockDrift(d time.Duration) Option { - return func(c *Client) { - c.maxClockDrift = d - } + return func(c *Client) { c.maxClockDrift = d } } // MaxBlockLag represents the maximum time difference between the realtime @@ -116,9 +110,13 @@ func MaxClockDrift(d time.Duration) Option { // was 12:00. Then the lag here is 5 minutes. // Default: 10s func MaxBlockLag(d time.Duration) Option { - return func(c *Client) { - c.maxBlockLag = d - } + return func(c *Client) { c.maxBlockLag = d } +} + +// Provider timeout is the maximum time that the light client will wait for a +// provider to respond with a light block. +func ProviderTimeout(d time.Duration) Option { + return func(c *Client) { c.providerTimeout = d } } // Client represents a light client, connected to a single chain, which gets @@ -133,6 +131,7 @@ type Client struct { trustLevel tmmath.Fraction maxClockDrift time.Duration maxBlockLag time.Duration + providerTimeout time.Duration // Mutex for locking during changes of the light clients providers providerMutex tmsync.Mutex @@ -197,12 +196,13 @@ func NewClient( chainID: chainID, trustingPeriod: trustOptions.Period, verificationMode: skipping, - trustLevel: DefaultTrustLevel, - maxClockDrift: defaultMaxClockDrift, - maxBlockLag: defaultMaxBlockLag, primary: primary, witnesses: witnesses, trustedStore: trustedStore, + trustLevel: DefaultTrustLevel, + maxClockDrift: defaultMaxClockDrift, + maxBlockLag: defaultMaxBlockLag, + providerTimeout: defaultProviderTimeout, pruningSize: defaultPruningSize, logger: log.NewNopLogger(), } @@ -379,6 +379,7 @@ func (c *Client) Update(ctx context.Context, now time.Time) (*types.LightBlock, return nil, err } + // If there is a new light block then verify it if latestBlock.Height > lastTrustedHeight { err = c.verifyLightBlock(ctx, latestBlock, now) if err != nil { @@ -388,7 +389,8 @@ func (c *Client) Update(ctx context.Context, now time.Time) (*types.LightBlock, return latestBlock, nil } - return nil, nil + // else return the latestTrustedBlock + return c.latestTrustedBlock, nil } // VerifyLightBlockAtHeight fetches the light block at the given height @@ -693,7 +695,9 @@ func (c *Client) verifySkipping( if depth == len(blockCache)-1 { // schedule what the next height we need to fetch is pivotHeight := c.schedule(verifiedBlock.Height, blockCache[depth].Height) - interimBlock, providerErr := source.LightBlock(ctx, pivotHeight) + subCtx, cancel := context.WithTimeout(ctx, c.providerTimeout) + defer cancel() + interimBlock, providerErr := c.getLightBlock(subCtx, source, pivotHeight) if providerErr != nil { return nil, ErrVerificationFailed{From: verifiedBlock.Height, To: pivotHeight, Reason: providerErr} } @@ -930,7 +934,7 @@ func (c *Client) backwards( // any other error, the primary is permanently dropped and is replaced by a witness. func (c *Client) lightBlockFromPrimary(ctx context.Context, height int64) (*types.LightBlock, error) { c.providerMutex.Lock() - l, err := c.primary.LightBlock(ctx, height) + l, err := c.getLightBlock(ctx, c.primary, height) c.providerMutex.Unlock() switch err { @@ -957,6 +961,16 @@ func (c *Client) lightBlockFromPrimary(ctx context.Context, height int64) (*type } } +func (c *Client) getLightBlock(ctx context.Context, p provider.Provider, height int64) (*types.LightBlock, error) { + subCtx, cancel := context.WithTimeout(ctx, c.providerTimeout) + defer cancel() + l, err := p.LightBlock(subCtx, height) + if err == context.DeadlineExceeded || ctx.Err() != nil { + return nil, provider.ErrNoResponse + } + return l, err +} + // NOTE: requires a providerMutex lock func (c *Client) removeWitnesses(indexes []int) error { // check that we will still have witnesses remaining @@ -989,7 +1003,7 @@ func (c *Client) findNewPrimary(ctx context.Context, height int64, remove bool) c.providerMutex.Lock() defer c.providerMutex.Unlock() - if len(c.witnesses) <= 1 { + if len(c.witnesses) < 1 { return nil, ErrNoWitnesses } @@ -1001,7 +1015,7 @@ func (c *Client) findNewPrimary(ctx context.Context, height int64, remove bool) ) // send out a light block request to all witnesses - subctx, cancel := context.WithCancel(ctx) + subctx, cancel := context.WithTimeout(ctx, c.providerTimeout) defer cancel() for index := range c.witnesses { wg.Add(1) diff --git a/light/client_test.go b/light/client_test.go index e8a478a53..c7c974ee5 100644 --- a/light/client_test.go +++ b/light/client_test.go @@ -11,7 +11,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/internal/test/factory" @@ -644,7 +643,7 @@ func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) { chainID, trustOptions, mockDeadNode, - []provider.Provider{mockFullNode, mockFullNode}, + []provider.Provider{mockDeadNode, mockFullNode}, dbs.New(dbm.NewMemDB()), light.Logger(log.TestingLogger()), ) @@ -663,6 +662,32 @@ func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) { mockFullNode.AssertExpectations(t) } +func TestClientReplacesPrimaryWithWitnessIfPrimaryDoesntHaveBlock(t *testing.T) { + mockFullNode := &provider_mocks.Provider{} + mockFullNode.On("LightBlock", mock.Anything, mock.Anything).Return(l1, nil) + + mockDeadNode := &provider_mocks.Provider{} + mockDeadNode.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound) + c, err := light.NewClient( + ctx, + chainID, + trustOptions, + mockDeadNode, + []provider.Provider{mockDeadNode, mockFullNode}, + dbs.New(dbm.NewMemDB()), + light.Logger(log.TestingLogger()), + ) + require.NoError(t, err) + _, err = c.Update(ctx, bTime.Add(2*time.Hour)) + require.NoError(t, err) + + // we should still have the dead node as a witness because it + // hasn't repeatedly been unresponsive yet + assert.Equal(t, 2, len(c.Witnesses())) + mockDeadNode.AssertExpectations(t) + mockFullNode.AssertExpectations(t) +} + func TestClient_BackwardsVerification(t *testing.T) { { headers, vals, _ := genLightBlocksWithKeys(chainID, 9, 3, 0, bTime) @@ -724,51 +749,32 @@ func TestClient_BackwardsVerification(t *testing.T) { } { - testCases := []struct { - headers map[int64]*types.SignedHeader - vals map[int64]*types.ValidatorSet - }{ - { - // 7) provides incorrect height - headers: map[int64]*types.SignedHeader{ - 2: keys.GenSignedHeader(chainID, 1, bTime.Add(30*time.Minute), nil, vals, vals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), - 3: h3, - }, - vals: valSet, - }, - { - // 8) provides incorrect hash - headers: map[int64]*types.SignedHeader{ - 2: keys.GenSignedHeader(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, - hash("app_hash2"), hash("cons_hash23"), hash("results_hash30"), 0, len(keys)), - 3: h3, - }, - vals: valSet, - }, + // 8) provides incorrect hash + headers := map[int64]*types.SignedHeader{ + 2: keys.GenSignedHeader(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, + hash("app_hash2"), hash("cons_hash23"), hash("results_hash30"), 0, len(keys)), + 3: h3, } + vals := valSet + mockNode := mockNodeFromHeadersAndVals(headers, vals) + c, err := light.NewClient( + ctx, + chainID, + light.TrustOptions{ + Period: 1 * time.Hour, + Height: 3, + Hash: h3.Hash(), + }, + mockNode, + []provider.Provider{mockNode}, + dbs.New(dbm.NewMemDB()), + light.Logger(log.TestingLogger()), + ) + require.NoError(t, err) - for idx, tc := range testCases { - mockNode := mockNodeFromHeadersAndVals(tc.headers, tc.vals) - c, err := light.NewClient( - ctx, - chainID, - light.TrustOptions{ - Period: 1 * time.Hour, - Height: 3, - Hash: h3.Hash(), - }, - mockNode, - []provider.Provider{mockNode}, - dbs.New(dbm.NewMemDB()), - light.Logger(log.TestingLogger()), - ) - require.NoError(t, err, idx) - - _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour).Add(1*time.Second)) - assert.Error(t, err, idx) - mockNode.AssertExpectations(t) - } + _, err = c.VerifyLightBlockAtHeight(ctx, 2, bTime.Add(1*time.Hour).Add(1*time.Second)) + assert.Error(t, err) + mockNode.AssertExpectations(t) } } diff --git a/light/detector.go b/light/detector.go index 32a0c3f1e..ddb0bc4ed 100644 --- a/light/detector.go +++ b/light/detector.go @@ -110,7 +110,7 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan error, h *types.SignedHeader, witness provider.Provider, witnessIndex int) { - lightBlock, err := witness.LightBlock(ctx, h.Height) + lightBlock, err := c.getLightBlock(ctx, witness, h.Height) switch err { // no error means we move on to checking the hash of the two headers case nil: @@ -331,7 +331,7 @@ func (c *Client) examineConflictingHeaderAgainstTrace( if traceBlock.Height == targetBlock.Height { sourceBlock = targetBlock } else { - sourceBlock, err = source.LightBlock(ctx, traceBlock.Height) + sourceBlock, err = c.getLightBlock(ctx, source, traceBlock.Height) if err != nil { return nil, nil, fmt.Errorf("failed to examine trace: %w", err) } @@ -379,7 +379,7 @@ func (c *Client) getTargetBlockOrLatest( height int64, witness provider.Provider, ) (bool, *types.LightBlock, error) { - lightBlock, err := witness.LightBlock(ctx, 0) + lightBlock, err := c.getLightBlock(ctx, witness, 0) if err != nil { return false, nil, err } @@ -394,7 +394,7 @@ func (c *Client) getTargetBlockOrLatest( // the witness has caught up. We recursively call the function again. However in order // to avoud a wild goose chase where the witness sends us one header below and one header // above the height we set a timeout to the context - lightBlock, err := witness.LightBlock(ctx, height) + lightBlock, err := c.getLightBlock(ctx, witness, height) return true, lightBlock, err } diff --git a/light/doc.go b/light/doc.go index 700bbeb6c..c30c68eb0 100644 --- a/light/doc.go +++ b/light/doc.go @@ -94,7 +94,7 @@ Check out other examples in example_test.go ## 2. Pure functions to verify a new header (see verifier.go) Verify function verifies a new header against some trusted header. See -https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/verification.md +https://github.com/tendermint/spec/blob/master/spec/light-client/verification/README.md for details. There are two methods of verification: sequential and bisection @@ -118,10 +118,7 @@ as a wrapper, which verifies all the headers, using a light client connected to some other node. See -https://docs.tendermint.com/master/tendermint-core/light-client-protocol.html -for usage example. -Or see -https://github.com/tendermint/spec/tree/master/spec/consensus/light-client -for the full spec +https://github.com/tendermint/spec/tree/master/spec/light-client +for the light client specification. */ package light diff --git a/light/provider/http/http.go b/light/provider/http/http.go index ceea0f6d2..f8bf7d29e 100644 --- a/light/provider/http/http.go +++ b/light/provider/http/http.go @@ -12,7 +12,7 @@ import ( "github.com/tendermint/tendermint/light/provider" rpcclient "github.com/tendermint/tendermint/rpc/client" rpchttp "github.com/tendermint/tendermint/rpc/client/http" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -300,11 +300,11 @@ func (p *http) noBlock(e error) error { func (p *http) parseRPCError(e *rpctypes.RPCError) error { switch { // 1) check if the error indicates that the peer doesn't have the block - case strings.Contains(e.Data, ctypes.ErrHeightNotAvailable.Error()): + case strings.Contains(e.Data, coretypes.ErrHeightNotAvailable.Error()): return p.noBlock(provider.ErrLightBlockNotFound) // 2) check if the height requested is too high - case strings.Contains(e.Data, ctypes.ErrHeightExceedsChainHead.Error()): + case strings.Contains(e.Data, coretypes.ErrHeightExceedsChainHead.Error()): return p.noBlock(provider.ErrHeightTooHigh) // 3) check if the provider closed the connection diff --git a/light/proxy/proxy.go b/light/proxy/proxy.go index 8f1e7bf87..6f2622588 100644 --- a/light/proxy/proxy.go +++ b/light/proxy/proxy.go @@ -113,7 +113,7 @@ func (p *Proxy) listen() (net.Listener, *http.ServeMux, error) { } // 4) Start listening for new connections. - listener, err := rpcserver.Listen(p.Addr, p.Config) + listener, err := rpcserver.Listen(p.Addr, p.Config.MaxOpenConnections) if err != nil { return nil, mux, err } diff --git a/light/proxy/routes.go b/light/proxy/routes.go index 62d70f545..436ae1b76 100644 --- a/light/proxy/routes.go +++ b/light/proxy/routes.go @@ -4,7 +4,7 @@ import ( "github.com/tendermint/tendermint/libs/bytes" lrpc "github.com/tendermint/tendermint/light/rpc" rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" @@ -52,91 +52,91 @@ func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc { } } -type rpcHealthFunc func(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) +type rpcHealthFunc func(ctx *rpctypes.Context) (*coretypes.ResultHealth, error) func makeHealthFunc(c *lrpc.Client) rpcHealthFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultHealth, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultHealth, error) { return c.Health(ctx.Context()) } } -type rpcStatusFunc func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) +type rpcStatusFunc func(ctx *rpctypes.Context) (*coretypes.ResultStatus, error) // nolint: interfacer func makeStatusFunc(c *lrpc.Client) rpcStatusFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultStatus, error) { return c.Status(ctx.Context()) } } -type rpcNetInfoFunc func(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) +type rpcNetInfoFunc func(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error) func makeNetInfoFunc(c *lrpc.Client) rpcNetInfoFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultNetInfo, error) { return c.NetInfo(ctx.Context()) } } -type rpcBlockchainInfoFunc func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) +type rpcBlockchainInfoFunc func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) //nolint:lll func makeBlockchainInfoFunc(c *lrpc.Client) rpcBlockchainInfoFunc { - return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { + return func(ctx *rpctypes.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { return c.BlockchainInfo(ctx.Context(), minHeight, maxHeight) } } -type rpcGenesisFunc func(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) +type rpcGenesisFunc func(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error) func makeGenesisFunc(c *lrpc.Client) rpcGenesisFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultGenesis, error) { return c.Genesis(ctx.Context()) } } -type rpcGenesisChunkedFunc func(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) +type rpcGenesisChunkedFunc func(ctx *rpctypes.Context, chunk uint) (*coretypes.ResultGenesisChunk, error) func makeGenesisChunkedFunc(c *lrpc.Client) rpcGenesisChunkedFunc { - return func(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) { + return func(ctx *rpctypes.Context, chunk uint) (*coretypes.ResultGenesisChunk, error) { return c.GenesisChunked(ctx.Context(), chunk) } } -type rpcBlockFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error) +type rpcBlockFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlock, error) func makeBlockFunc(c *lrpc.Client) rpcBlockFunc { - return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlock, error) { + return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlock, error) { return c.Block(ctx.Context(), height) } } -type rpcBlockByHashFunc func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) +type rpcBlockByHashFunc func(ctx *rpctypes.Context, hash []byte) (*coretypes.ResultBlock, error) func makeBlockByHashFunc(c *lrpc.Client) rpcBlockByHashFunc { - return func(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) { + return func(ctx *rpctypes.Context, hash []byte) (*coretypes.ResultBlock, error) { return c.BlockByHash(ctx.Context(), hash) } } -type rpcBlockResultsFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlockResults, error) +type rpcBlockResultsFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlockResults, error) func makeBlockResultsFunc(c *lrpc.Client) rpcBlockResultsFunc { - return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultBlockResults, error) { + return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultBlockResults, error) { return c.BlockResults(ctx.Context(), height) } } -type rpcCommitFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error) +type rpcCommitFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultCommit, error) func makeCommitFunc(c *lrpc.Client) rpcCommitFunc { - return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultCommit, error) { + return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultCommit, error) { return c.Commit(ctx.Context(), height) } } -type rpcTxFunc func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) +type rpcTxFunc func(ctx *rpctypes.Context, hash []byte, prove bool) (*coretypes.ResultTx, error) func makeTxFunc(c *lrpc.Client) rpcTxFunc { - return func(ctx *rpctypes.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { + return func(ctx *rpctypes.Context, hash []byte, prove bool) (*coretypes.ResultTx, error) { return c.Tx(ctx.Context(), hash, prove) } } @@ -147,7 +147,7 @@ type rpcTxSearchFunc func( prove bool, page, perPage *int, orderBy string, -) (*ctypes.ResultTxSearch, error) +) (*coretypes.ResultTxSearch, error) func makeTxSearchFunc(c *lrpc.Client) rpcTxSearchFunc { return func( @@ -156,7 +156,7 @@ func makeTxSearchFunc(c *lrpc.Client) rpcTxSearchFunc { prove bool, page, perPage *int, orderBy string, - ) (*ctypes.ResultTxSearch, error) { + ) (*coretypes.ResultTxSearch, error) { return c.TxSearch(ctx.Context(), query, prove, page, perPage, orderBy) } } @@ -167,7 +167,7 @@ type rpcBlockSearchFunc func( prove bool, page, perPage *int, orderBy string, -) (*ctypes.ResultBlockSearch, error) +) (*coretypes.ResultBlockSearch, error) func makeBlockSearchFunc(c *lrpc.Client) rpcBlockSearchFunc { return func( @@ -176,90 +176,90 @@ func makeBlockSearchFunc(c *lrpc.Client) rpcBlockSearchFunc { prove bool, page, perPage *int, orderBy string, - ) (*ctypes.ResultBlockSearch, error) { + ) (*coretypes.ResultBlockSearch, error) { return c.BlockSearch(ctx.Context(), query, page, perPage, orderBy) } } type rpcValidatorsFunc func(ctx *rpctypes.Context, height *int64, - page, perPage *int) (*ctypes.ResultValidators, error) + page, perPage *int) (*coretypes.ResultValidators, error) func makeValidatorsFunc(c *lrpc.Client) rpcValidatorsFunc { - return func(ctx *rpctypes.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { + return func(ctx *rpctypes.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) { return c.Validators(ctx.Context(), height, page, perPage) } } -type rpcDumpConsensusStateFunc func(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) +type rpcDumpConsensusStateFunc func(ctx *rpctypes.Context) (*coretypes.ResultDumpConsensusState, error) func makeDumpConsensusStateFunc(c *lrpc.Client) rpcDumpConsensusStateFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultDumpConsensusState, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultDumpConsensusState, error) { return c.DumpConsensusState(ctx.Context()) } } -type rpcConsensusStateFunc func(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) +type rpcConsensusStateFunc func(ctx *rpctypes.Context) (*coretypes.ResultConsensusState, error) func makeConsensusStateFunc(c *lrpc.Client) rpcConsensusStateFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultConsensusState, error) { return c.ConsensusState(ctx.Context()) } } -type rpcConsensusParamsFunc func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultConsensusParams, error) +type rpcConsensusParamsFunc func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultConsensusParams, error) func makeConsensusParamsFunc(c *lrpc.Client) rpcConsensusParamsFunc { - return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultConsensusParams, error) { + return func(ctx *rpctypes.Context, height *int64) (*coretypes.ResultConsensusParams, error) { return c.ConsensusParams(ctx.Context(), height) } } -type rpcUnconfirmedTxsFunc func(ctx *rpctypes.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) +type rpcUnconfirmedTxsFunc func(ctx *rpctypes.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) func makeUnconfirmedTxsFunc(c *lrpc.Client) rpcUnconfirmedTxsFunc { - return func(ctx *rpctypes.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { + return func(ctx *rpctypes.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { return c.UnconfirmedTxs(ctx.Context(), limit) } } -type rpcNumUnconfirmedTxsFunc func(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) +type rpcNumUnconfirmedTxsFunc func(ctx *rpctypes.Context) (*coretypes.ResultUnconfirmedTxs, error) func makeNumUnconfirmedTxsFunc(c *lrpc.Client) rpcNumUnconfirmedTxsFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultUnconfirmedTxs, error) { return c.NumUnconfirmedTxs(ctx.Context()) } } -type rpcBroadcastTxCommitFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) +type rpcBroadcastTxCommitFunc func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) func makeBroadcastTxCommitFunc(c *lrpc.Client) rpcBroadcastTxCommitFunc { - return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { + return func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { return c.BroadcastTxCommit(ctx.Context(), tx) } } -type rpcBroadcastTxSyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) +type rpcBroadcastTxSyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) func makeBroadcastTxSyncFunc(c *lrpc.Client) rpcBroadcastTxSyncFunc { - return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.BroadcastTxSync(ctx.Context(), tx) } } -type rpcBroadcastTxAsyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) +type rpcBroadcastTxAsyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) func makeBroadcastTxAsyncFunc(c *lrpc.Client) rpcBroadcastTxAsyncFunc { - return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return func(ctx *rpctypes.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.BroadcastTxAsync(ctx.Context(), tx) } } type rpcABCIQueryFunc func(ctx *rpctypes.Context, path string, - data bytes.HexBytes, height int64, prove bool) (*ctypes.ResultABCIQuery, error) + data bytes.HexBytes, height int64, prove bool) (*coretypes.ResultABCIQuery, error) func makeABCIQueryFunc(c *lrpc.Client) rpcABCIQueryFunc { return func(ctx *rpctypes.Context, path string, data bytes.HexBytes, - height int64, prove bool) (*ctypes.ResultABCIQuery, error) { + height int64, prove bool) (*coretypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(ctx.Context(), path, data, rpcclient.ABCIQueryOptions{ Height: height, @@ -268,19 +268,19 @@ func makeABCIQueryFunc(c *lrpc.Client) rpcABCIQueryFunc { } } -type rpcABCIInfoFunc func(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) +type rpcABCIInfoFunc func(ctx *rpctypes.Context) (*coretypes.ResultABCIInfo, error) func makeABCIInfoFunc(c *lrpc.Client) rpcABCIInfoFunc { - return func(ctx *rpctypes.Context) (*ctypes.ResultABCIInfo, error) { + return func(ctx *rpctypes.Context) (*coretypes.ResultABCIInfo, error) { return c.ABCIInfo(ctx.Context()) } } -type rpcBroadcastEvidenceFunc func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) +type rpcBroadcastEvidenceFunc func(ctx *rpctypes.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) // nolint: interfacer func makeBroadcastEvidenceFunc(c *lrpc.Client) rpcBroadcastEvidenceFunc { - return func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { + return func(ctx *rpctypes.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { return c.BroadcastEvidence(ctx.Context(), ev) } } diff --git a/light/rpc/client.go b/light/rpc/client.go index 48cf7ce73..dc745542e 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -16,7 +16,7 @@ import ( tmmath "github.com/tendermint/tendermint/libs/math" service "github.com/tendermint/tendermint/libs/service" rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -113,22 +113,22 @@ func (c *Client) OnStop() { } } -func (c *Client) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (c *Client) Status(ctx context.Context) (*coretypes.ResultStatus, error) { return c.next.Status(ctx) } -func (c *Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { +func (c *Client) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { return c.next.ABCIInfo(ctx) } // ABCIQuery requests proof by default. -func (c *Client) ABCIQuery(ctx context.Context, path string, data tmbytes.HexBytes) (*ctypes.ResultABCIQuery, error) { +func (c *Client) ABCIQuery(ctx context.Context, path string, data tmbytes.HexBytes) (*coretypes.ResultABCIQuery, error) { //nolint:lll return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) } // ABCIQueryWithOptions returns an error if opts.Prove is false. func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmbytes.HexBytes, - opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts rpcclient.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { // always request the proof opts.Prove = true @@ -150,7 +150,7 @@ func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmb return nil, errors.New("no proof ops") } if resp.Height <= 0 { - return nil, ctypes.ErrZeroOrNegativeHeight + return nil, coretypes.ErrZeroOrNegativeHeight } // Update the light client if we're behind. @@ -185,46 +185,50 @@ func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmb } } - return &ctypes.ResultABCIQuery{Response: resp}, nil + return &coretypes.ResultABCIQuery{Response: resp}, nil } -func (c *Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c *Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { return c.next.BroadcastTxCommit(ctx, tx) } -func (c *Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.next.BroadcastTxAsync(ctx, tx) } -func (c *Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.next.BroadcastTxSync(ctx, tx) } -func (c *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { +func (c *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { return c.next.UnconfirmedTxs(ctx, limit) } -func (c *Client) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { +func (c *Client) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) { return c.next.NumUnconfirmedTxs(ctx) } -func (c *Client) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +func (c *Client) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { return c.next.CheckTx(ctx, tx) } -func (c *Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { +func (c *Client) RemoveTx(ctx context.Context, txKey types.TxKey) error { + return c.next.RemoveTx(ctx, txKey) +} + +func (c *Client) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { return c.next.NetInfo(ctx) } -func (c *Client) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { +func (c *Client) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { return c.next.DumpConsensusState(ctx) } -func (c *Client) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { +func (c *Client) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { return c.next.ConsensusState(ctx) } -func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { +func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { res, err := c.next.ConsensusParams(ctx, height) if err != nil { return nil, err @@ -235,7 +239,7 @@ func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.Re return nil, err } if res.BlockHeight <= 0 { - return nil, ctypes.ErrZeroOrNegativeHeight + return nil, coretypes.ErrZeroOrNegativeHeight } // Update the light client if we're behind. @@ -253,13 +257,13 @@ func (c *Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.Re return res, nil } -func (c *Client) Health(ctx context.Context) (*ctypes.ResultHealth, error) { +func (c *Client) Health(ctx context.Context) (*coretypes.ResultHealth, error) { return c.next.Health(ctx) } // BlockchainInfo calls rpcclient#BlockchainInfo and then verifies every header // returned. -func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { +func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { //nolint:lll res, err := c.next.BlockchainInfo(ctx, minHeight, maxHeight) if err != nil { return nil, err @@ -298,16 +302,16 @@ func (c *Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) return res, nil } -func (c *Client) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { +func (c *Client) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { return c.next.Genesis(ctx) } -func (c *Client) GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) { +func (c *Client) GenesisChunked(ctx context.Context, id uint) (*coretypes.ResultGenesisChunk, error) { return c.next.GenesisChunked(ctx, id) } // Block calls rpcclient#Block and then verifies the result. -func (c *Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { +func (c *Client) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { res, err := c.next.Block(ctx, height) if err != nil { return nil, err @@ -341,7 +345,7 @@ func (c *Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, } // BlockByHash calls rpcclient#BlockByHash and then verifies the result. -func (c *Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { +func (c *Client) BlockByHash(ctx context.Context, hash tmbytes.HexBytes) (*coretypes.ResultBlock, error) { res, err := c.next.BlockByHash(ctx, hash) if err != nil { return nil, err @@ -376,7 +380,7 @@ func (c *Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBl // BlockResults returns the block results for the given height. If no height is // provided, the results of the block preceding the latest are returned. -func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) { +func (c *Client) BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) { var h int64 if height == nil { res, err := c.next.Status(ctx) @@ -397,7 +401,7 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul // Validate res. if res.Height <= 0 { - return nil, ctypes.ErrZeroOrNegativeHeight + return nil, coretypes.ErrZeroOrNegativeHeight } // Update the light client if we're behind. @@ -438,7 +442,7 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul return res, nil } -func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { +func (c *Client) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { // Update the light client if we're behind and retrieve the light block at the requested height // or at the latest height if no height is provided. l, err := c.updateLightClientIfNeededTo(ctx, height) @@ -446,7 +450,7 @@ func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommi return nil, err } - return &ctypes.ResultCommit{ + return &coretypes.ResultCommit{ SignedHeader: *l.SignedHeader, CanonicalCommit: true, }, nil @@ -454,7 +458,7 @@ func (c *Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommi // Tx calls rpcclient#Tx method and then verifies the proof if such was // requested. -func (c *Client) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { +func (c *Client) Tx(ctx context.Context, hash tmbytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { res, err := c.next.Tx(ctx, hash, prove) if err != nil || !prove { return res, err @@ -462,7 +466,7 @@ func (c *Client) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.Resul // Validate res. if res.Height <= 0 { - return nil, ctypes.ErrZeroOrNegativeHeight + return nil, coretypes.ErrZeroOrNegativeHeight } // Update the light client if we're behind. @@ -481,7 +485,7 @@ func (c *Client) TxSearch( prove bool, page, perPage *int, orderBy string, -) (*ctypes.ResultTxSearch, error) { +) (*coretypes.ResultTxSearch, error) { return c.next.TxSearch(ctx, query, prove, page, perPage, orderBy) } @@ -490,7 +494,7 @@ func (c *Client) BlockSearch( query string, page, perPage *int, orderBy string, -) (*ctypes.ResultBlockSearch, error) { +) (*coretypes.ResultBlockSearch, error) { return c.next.BlockSearch(ctx, query, page, perPage, orderBy) } @@ -499,7 +503,7 @@ func (c *Client) Validators( ctx context.Context, height *int64, pagePtr, perPagePtr *int, -) (*ctypes.ResultValidators, error) { +) (*coretypes.ResultValidators, error) { // Update the light client if we're behind and retrieve the light block at the // requested height or at the latest height if no height is provided. @@ -518,19 +522,19 @@ func (c *Client) Validators( skipCount := validateSkipCount(page, perPage) v := l.ValidatorSet.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] - return &ctypes.ResultValidators{ + return &coretypes.ResultValidators{ BlockHeight: l.Height, Validators: v, Count: len(v), Total: totalCount}, nil } -func (c *Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { +func (c *Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { return c.next.BroadcastEvidence(ctx, ev) } func (c *Client) Subscribe(ctx context.Context, subscriber, query string, - outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { + outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) { return c.next.Subscribe(ctx, subscriber, query, outCapacity...) } @@ -565,7 +569,7 @@ func (c *Client) RegisterOpDecoder(typ string, dec merkle.OpDecoder) { // SubscribeWS subscribes for events using the given query and remote address as // a subscriber, but does not verify responses (UNSAFE)! // TODO: verify data -func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultSubscribe, error) { +func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*coretypes.ResultSubscribe, error) { out, err := c.next.Subscribe(context.Background(), ctx.RemoteAddr(), query) if err != nil { return nil, err @@ -588,27 +592,27 @@ func (c *Client) SubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.Resul } }() - return &ctypes.ResultSubscribe{}, nil + return &coretypes.ResultSubscribe{}, nil } // UnsubscribeWS calls original client's Unsubscribe using remote address as a // subscriber. -func (c *Client) UnsubscribeWS(ctx *rpctypes.Context, query string) (*ctypes.ResultUnsubscribe, error) { +func (c *Client) UnsubscribeWS(ctx *rpctypes.Context, query string) (*coretypes.ResultUnsubscribe, error) { err := c.next.Unsubscribe(context.Background(), ctx.RemoteAddr(), query) if err != nil { return nil, err } - return &ctypes.ResultUnsubscribe{}, nil + return &coretypes.ResultUnsubscribe{}, nil } // UnsubscribeAllWS calls original client's UnsubscribeAll using remote address // as a subscriber. -func (c *Client) UnsubscribeAllWS(ctx *rpctypes.Context) (*ctypes.ResultUnsubscribe, error) { +func (c *Client) UnsubscribeAllWS(ctx *rpctypes.Context) (*coretypes.ResultUnsubscribe, error) { err := c.next.UnsubscribeAll(context.Background(), ctx.RemoteAddr()) if err != nil { return nil, err } - return &ctypes.ResultUnsubscribe{}, nil + return &coretypes.ResultUnsubscribe{}, nil } // XXX: Copied from rpc/core/env.go @@ -620,7 +624,7 @@ const ( func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { if perPage < 1 { - panic(fmt.Errorf("%w (%d)", ctypes.ErrZeroOrNegativePerPage, perPage)) + panic(fmt.Errorf("%w (%d)", coretypes.ErrZeroOrNegativePerPage, perPage)) } if pagePtr == nil { // no page parameter @@ -633,7 +637,7 @@ func validatePage(pagePtr *int, perPage, totalCount int) (int, error) { } page := *pagePtr if page <= 0 || page > pages { - return 1, fmt.Errorf("%w expected range: [1, %d], given %d", ctypes.ErrPageOutOfRange, pages, page) + return 1, fmt.Errorf("%w expected range: [1, %d], given %d", coretypes.ErrPageOutOfRange, pages, page) } return page, nil diff --git a/light/store/db/db_test.go b/light/store/db/db_test.go index b373d5126..7f963eb92 100644 --- a/light/store/db/db_test.go +++ b/light/store/db/db_test.go @@ -7,7 +7,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/crypto" diff --git a/networks/remote/ansible/inventory/digital_ocean.py b/networks/remote/ansible/inventory/digital_ocean.py index 24ba64370..383b329a1 100755 --- a/networks/remote/ansible/inventory/digital_ocean.py +++ b/networks/remote/ansible/inventory/digital_ocean.py @@ -1,6 +1,6 @@ #!/usr/bin/env python -''' +""" DigitalOcean external inventory script ====================================== @@ -22,7 +22,7 @@ found. You can force this script to use the cache with --force-cache. ---- Configuration is read from `digital_ocean.ini`, then from environment variables, -then and command-line arguments. +and then from command-line arguments. Most notably, the DigitalOcean API Token must be specified. It can be specified in the INI file or with the following environment variables: @@ -40,6 +40,7 @@ is to use the output of the --env option with export: The following groups are generated from --list: - ID (droplet ID) - NAME (droplet NAME) + - digital_ocean - image_ID - image_NAME - distro_NAME (distribution NAME from image) @@ -73,14 +74,12 @@ For each host, the following variables are registered: ----- ``` -usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] - [--droplets] [--regions] [--images] [--sizes] - [--ssh-keys] [--domains] [--pretty] - [--cache-path CACHE_PATH] - [--cache-max_age CACHE_MAX_AGE] - [--force-cache] - [--refresh-cache] - [--api-token API_TOKEN] +usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] [--droplets] + [--regions] [--images] [--sizes] [--ssh-keys] + [--domains] [--tags] [--pretty] + [--cache-path CACHE_PATH] + [--cache-max_age CACHE_MAX_AGE] [--force-cache] + [--refresh-cache] [--env] [--api-token API_TOKEN] Produce an Ansible Inventory file based on DigitalOcean credentials @@ -91,65 +90,129 @@ optional arguments: --host HOST Get all Ansible inventory variables about a specific Droplet --all List all DigitalOcean information as JSON - --droplets List Droplets as JSON + --droplets, -d List Droplets as JSON --regions List Regions as JSON --images List Images as JSON --sizes List Sizes as JSON --ssh-keys List SSH keys as JSON --domains List Domains as JSON + --tags List Tags as JSON --pretty, -p Pretty-print results --cache-path CACHE_PATH Path to the cache files (default: .) --cache-max_age CACHE_MAX_AGE Maximum age of the cached items (default: 0) --force-cache Only use data from the cache - --refresh-cache Force refresh of cache by making API requests to + --refresh-cache, -r Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files) + --env, -e Display DO_API_TOKEN --api-token API_TOKEN, -a API_TOKEN DigitalOcean API Token ``` -''' +""" # (c) 2013, Evan Wies +# (c) 2017, Ansible Project +# (c) 2017, Abhijeet Kasurde # # Inspired by the EC2 inventory plugin: # https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py # -# This file is part of Ansible, -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type ###################################################################### -import os -import sys -import re import argparse -from time import time -import ConfigParser import ast +import os +import re +import requests +import sys +from time import time try: - import json + import ConfigParser except ImportError: - import simplejson as json + import configparser as ConfigParser -try: - from dopy.manager import DoManager -except ImportError as e: - sys.exit("failed=True msg='`dopy` library required for this script'") +import json + + +class DoManager: + def __init__(self, api_token): + self.api_token = api_token + self.api_endpoint = 'https://api.digitalocean.com/v2' + self.headers = {'Authorization': 'Bearer {0}'.format(self.api_token), + 'Content-type': 'application/json'} + self.timeout = 60 + + def _url_builder(self, path): + if path[0] == '/': + path = path[1:] + return '%s/%s' % (self.api_endpoint, path) + + def send(self, url, method='GET', data=None): + url = self._url_builder(url) + data = json.dumps(data) + try: + if method == 'GET': + resp_data = {} + incomplete = True + while incomplete: + resp = requests.get(url, data=data, headers=self.headers, timeout=self.timeout) + json_resp = resp.json() + + for key, value in json_resp.items(): + if isinstance(value, list) and key in resp_data: + resp_data[key] += value + else: + resp_data[key] = value + + try: + url = json_resp['links']['pages']['next'] + except KeyError: + incomplete = False + + except ValueError as e: + sys.exit("Unable to parse result from %s: %s" % (url, e)) + return resp_data + + def all_active_droplets(self): + resp = self.send('droplets/') + return resp['droplets'] + + def all_regions(self): + resp = self.send('regions/') + return resp['regions'] + + def all_images(self, filter_name='global'): + params = {'filter': filter_name} + resp = self.send('images/', data=params) + return resp['images'] + + def sizes(self): + resp = self.send('sizes/') + return resp['sizes'] + + def all_ssh_keys(self): + resp = self.send('account/keys') + return resp['ssh_keys'] + + def all_domains(self): + resp = self.send('domains/') + return resp['domains'] + + def show_droplet(self, droplet_id): + resp = self.send('droplets/%s' % droplet_id) + return resp['droplet'] + + def all_tags(self): + resp = self.send('tags') + return resp['tags'] class DigitalOceanInventory(object): @@ -159,7 +222,7 @@ class DigitalOceanInventory(object): ########################################################################### def __init__(self): - ''' Main execution path ''' + """Main execution path """ # DigitalOceanInventory data self.data = {} # All DigitalOcean data @@ -178,9 +241,9 @@ class DigitalOceanInventory(object): # Verify credentials were set if not hasattr(self, 'api_token'): - sys.stderr.write('''Could not find values for DigitalOcean api_token. -They must be specified via either ini file, command line argument (--api-token), -or environment variables (DO_API_TOKEN)\n''') + msg = 'Could not find values for DigitalOcean api_token. They must be specified via either ini file, ' \ + 'command line argument (--api-token), or environment variables (DO_API_TOKEN)\n' + sys.stderr.write(msg) sys.exit(-1) # env command, show DigitalOcean credentials @@ -196,10 +259,10 @@ or environment variables (DO_API_TOKEN)\n''') self.load_from_cache() if len(self.data) == 0: if self.args.force_cache: - sys.stderr.write('''Cache is empty and --force-cache was specified\n''') + sys.stderr.write('Cache is empty and --force-cache was specified\n') sys.exit(-1) - self.manager = DoManager(None, self.api_token, api_version=2) + self.manager = DoManager(self.api_token) # Pick the json_data to print based on the CLI command if self.args.droplets: @@ -220,6 +283,9 @@ or environment variables (DO_API_TOKEN)\n''') elif self.args.domains: self.load_from_digital_ocean('domains') json_data = {'domains': self.data['domains']} + elif self.args.tags: + self.load_from_digital_ocean('tags') + json_data = {'tags': self.data['tags']} elif self.args.all: self.load_from_digital_ocean() json_data = self.data @@ -234,19 +300,19 @@ or environment variables (DO_API_TOKEN)\n''') self.write_to_cache() if self.args.pretty: - print(json.dumps(json_data, sort_keys=True, indent=2)) + print(json.dumps(json_data, indent=2)) else: print(json.dumps(json_data)) - # That's all she wrote... ########################################################################### # Script configuration ########################################################################### def read_settings(self): - ''' Reads the settings from the digital_ocean.ini file ''' - config = ConfigParser.SafeConfigParser() - config.read(os.path.dirname(os.path.realpath(__file__)) + '/digital_ocean.ini') + """ Reads the settings from the digital_ocean.ini file """ + config = ConfigParser.ConfigParser() + config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'digital_ocean.ini') + config.read(config_path) # Credentials if config.has_option('digital_ocean', 'api_token'): @@ -267,7 +333,7 @@ or environment variables (DO_API_TOKEN)\n''') self.group_variables = ast.literal_eval(config.get('digital_ocean', 'group_variables')) def read_environment(self): - ''' Reads the settings from environment variables ''' + """ Reads the settings from environment variables """ # Setup credentials if os.getenv("DO_API_TOKEN"): self.api_token = os.getenv("DO_API_TOKEN") @@ -275,7 +341,7 @@ or environment variables (DO_API_TOKEN)\n''') self.api_token = os.getenv("DO_API_KEY") def read_cli_args(self): - ''' Command line argument processing ''' + """ Command line argument processing """ parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials') parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)') @@ -288,6 +354,7 @@ or environment variables (DO_API_TOKEN)\n''') parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON') parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON') parser.add_argument('--domains', action='store_true', help='List Domains as JSON') + parser.add_argument('--tags', action='store_true', help='List Tags as JSON') parser.add_argument('--pretty', '-p', action='store_true', help='Pretty-print results') @@ -309,6 +376,7 @@ or environment variables (DO_API_TOKEN)\n''') if (not self.args.droplets and not self.args.regions and not self.args.images and not self.args.sizes and not self.args.ssh_keys and not self.args.domains and + not self.args.tags and not self.args.all and not self.args.host): self.args.list = True @@ -317,7 +385,7 @@ or environment variables (DO_API_TOKEN)\n''') ########################################################################### def load_from_digital_ocean(self, resource=None): - '''Get JSON from DigitalOcean API''' + """Get JSON from DigitalOcean API """ if self.args.force_cache and os.path.isfile(self.cache_filename): return # We always get fresh droplets @@ -333,7 +401,7 @@ or environment variables (DO_API_TOKEN)\n''') self.data['regions'] = self.manager.all_regions() self.cache_refreshed = True if resource == 'images' or resource is None: - self.data['images'] = self.manager.all_images(filter=None) + self.data['images'] = self.manager.all_images() self.cache_refreshed = True if resource == 'sizes' or resource is None: self.data['sizes'] = self.manager.sizes() @@ -344,9 +412,27 @@ or environment variables (DO_API_TOKEN)\n''') if resource == 'domains' or resource is None: self.data['domains'] = self.manager.all_domains() self.cache_refreshed = True + if resource == 'tags' or resource is None: + self.data['tags'] = self.manager.all_tags() + self.cache_refreshed = True + + def add_inventory_group(self, key): + """ Method to create group dict """ + host_dict = {'hosts': [], 'vars': {}} + self.inventory[key] = host_dict + return + + def add_host(self, group, host): + """ Helper method to reduce host duplication """ + if group not in self.inventory: + self.add_inventory_group(group) + + if host not in self.inventory[group]['hosts']: + self.inventory[group]['hosts'].append(host) + return def build_inventory(self): - '''Build Ansible inventory of droplets''' + """ Build Ansible inventory of droplets """ self.inventory = { 'all': { 'hosts': [], @@ -357,52 +443,44 @@ or environment variables (DO_API_TOKEN)\n''') # add all droplets by id and name for droplet in self.data['droplets']: - # when using private_networking, the API reports the private one in "ip_address". - if 'private_networking' in droplet['features'] and not self.use_private_network: - for net in droplet['networks']['v4']: - if net['type'] == 'public': - dest = net['ip_address'] - else: - continue - else: - dest = droplet['ip_address'] + for net in droplet['networks']['v4']: + if net['type'] == 'public': + dest = net['ip_address'] + else: + continue self.inventory['all']['hosts'].append(dest) - self.inventory[droplet['id']] = [dest] - self.inventory[droplet['name']] = [dest] + self.add_host(droplet['id'], dest) + + self.add_host(droplet['name'], dest) # groups that are always present - for group in ('region_' + droplet['region']['slug'], + for group in ('digital_ocean', + 'region_' + droplet['region']['slug'], 'image_' + str(droplet['image']['id']), 'size_' + droplet['size']['slug'], - 'distro_' + self.to_safe(droplet['image']['distribution']), + 'distro_' + DigitalOceanInventory.to_safe(droplet['image']['distribution']), 'status_' + droplet['status']): - if group not in self.inventory: - self.inventory[group] = {'hosts': [], 'vars': {}} - self.inventory[group]['hosts'].append(dest) + self.add_host(group, dest) # groups that are not always present for group in (droplet['image']['slug'], droplet['image']['name']): if group: - image = 'image_' + self.to_safe(group) - if image not in self.inventory: - self.inventory[image] = {'hosts': [], 'vars': {}} - self.inventory[image]['hosts'].append(dest) + image = 'image_' + DigitalOceanInventory.to_safe(group) + self.add_host(image, dest) if droplet['tags']: for tag in droplet['tags']: - if tag not in self.inventory: - self.inventory[tag] = {'hosts': [], 'vars': {}} - self.inventory[tag]['hosts'].append(dest) + self.add_host(tag, dest) # hostvars info = self.do_namespace(droplet) self.inventory['_meta']['hostvars'][dest] = info def load_droplet_variables_for_host(self): - '''Generate a JSON response to a --host call''' + """ Generate a JSON response to a --host call """ host = int(self.args.host) droplet = self.manager.show_droplet(host) info = self.do_namespace(droplet) @@ -413,7 +491,7 @@ or environment variables (DO_API_TOKEN)\n''') ########################################################################### def is_cache_valid(self): - ''' Determines if the cache files have expired, or if it is still valid ''' + """ Determines if the cache files have expired, or if it is still valid """ if os.path.isfile(self.cache_filename): mod_time = os.path.getmtime(self.cache_filename) current_time = time() @@ -422,11 +500,10 @@ or environment variables (DO_API_TOKEN)\n''') return False def load_from_cache(self): - ''' Reads the data from the cache file and assigns it to member variables as Python Objects''' + """ Reads the data from the cache file and assigns it to member variables as Python Objects """ try: - cache = open(self.cache_filename, 'r') - json_data = cache.read() - cache.close() + with open(self.cache_filename, 'r') as cache: + json_data = cache.read() data = json.loads(json_data) except IOError: data = {'data': {}, 'inventory': {}} @@ -435,31 +512,24 @@ or environment variables (DO_API_TOKEN)\n''') self.inventory = data['inventory'] def write_to_cache(self): - ''' Writes data in JSON format to a file ''' + """ Writes data in JSON format to a file """ data = {'data': self.data, 'inventory': self.inventory} - json_data = json.dumps(data, sort_keys=True, indent=2) + json_data = json.dumps(data, indent=2) - cache = open(self.cache_filename, 'w') - cache.write(json_data) - cache.close() + with open(self.cache_filename, 'w') as cache: + cache.write(json_data) ########################################################################### # Utilities ########################################################################### + @staticmethod + def to_safe(word): + """ Converts 'bad' characters in a string to underscores so they can be used as Ansible groups """ + return re.sub(r"[^A-Za-z0-9\-.]", "_", word) - def push(self, my_dict, key, element): - ''' Pushed an element onto an array that may not have been defined in the dict ''' - if key in my_dict: - my_dict[key].append(element) - else: - my_dict[key] = [element] - - def to_safe(self, word): - ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' - return re.sub("[^A-Za-z0-9\-\.]", "_", word) - - def do_namespace(self, data): - ''' Returns a copy of the dictionary with all the keys put in a 'do_' namespace ''' + @staticmethod + def do_namespace(data): + """ Returns a copy of the dictionary with all the keys put in a 'do_' namespace """ info = {} for k, v in data.items(): info['do_' + k] = v diff --git a/networks/remote/terraform/cluster/main.tf b/networks/remote/terraform/cluster/main.tf index 98ab37cee..15a913b30 100644 --- a/networks/remote/terraform/cluster/main.tf +++ b/networks/remote/terraform/cluster/main.tf @@ -1,3 +1,12 @@ +terraform { + required_providers { + digitalocean = { + source = "digitalocean/digitalocean" + version = "~> 2.0" + } + } +} + resource "digitalocean_tag" "cluster" { name = "${var.name}" } diff --git a/networks/remote/terraform/cluster/variables.tf b/networks/remote/terraform/cluster/variables.tf index 1b6a70072..0dc66fafe 100644 --- a/networks/remote/terraform/cluster/variables.tf +++ b/networks/remote/terraform/cluster/variables.tf @@ -4,13 +4,13 @@ variable "name" { variable "regions" { description = "Regions to launch in" - type = "list" + type = list default = ["AMS3", "FRA1", "LON1", "NYC3", "SFO2", "SGP1", "TOR1"] } variable "ssh_key" { description = "SSH key filename to copy to the nodes" - type = "string" + type = string } variable "instance_size" { diff --git a/networks/remote/terraform/main.tf b/networks/remote/terraform/main.tf index a768ee13a..470734694 100644 --- a/networks/remote/terraform/main.tf +++ b/networks/remote/terraform/main.tf @@ -1,5 +1,14 @@ #Terraform Configuration +terraform { + required_providers { + digitalocean = { + source = "digitalocean/digitalocean" + version = "~> 2.0" + } + } +} + variable "DO_API_TOKEN" { description = "DigitalOcean Access Token" } @@ -11,7 +20,7 @@ variable "TESTNET_NAME" { variable "SSH_KEY_FILE" { description = "SSH public key file to be used on the nodes" - type = "string" + type = string } variable "SERVERS" { diff --git a/node/node.go b/node/node.go index 11794dda1..c3c0ecb35 100644 --- a/node/node.go +++ b/node/node.go @@ -6,40 +6,39 @@ import ( "fmt" "net" "net/http" - _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port "strconv" "time" - _ "github.com/lib/pq" // provide the psql db driver "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/rs/cors" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" - cs "github.com/tendermint/tendermint/internal/consensus" - "github.com/tendermint/tendermint/internal/evidence" + "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/pex" + "github.com/tendermint/tendermint/internal/proxy" + rpccore "github.com/tendermint/tendermint/internal/rpc/core" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" "github.com/tendermint/tendermint/internal/statesync" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/libs/strings" tmtime "github.com/tendermint/tendermint/libs/time" - "github.com/tendermint/tendermint/light" "github.com/tendermint/tendermint/privval" tmgrpc "github.com/tendermint/tendermint/privval/grpc" - "github.com/tendermint/tendermint/proxy" - rpccore "github.com/tendermint/tendermint/rpc/core" - grpccore "github.com/tendermint/tendermint/rpc/grpc" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" + + _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port + + _ "github.com/lib/pq" // provide the psql db driver ) // nodeImpl is the highest level interface to a full Tendermint node. @@ -48,22 +47,21 @@ type nodeImpl struct { service.BaseService // config - config *cfg.Config + config *config.Config genesisDoc *types.GenesisDoc // initial validator set privValidator types.PrivValidator // local node's validator key // network transport *p2p.MConnTransport - sw *p2p.Switch // p2p connections peerManager *p2p.PeerManager router *p2p.Router - addrBook pex.AddrBook // known peers nodeInfo types.NodeInfo nodeKey types.NodeKey // our node privkey isListening bool // services eventBus *types.EventBus // pub/sub for services + eventSinks []indexer.EventSink stateStore sm.Store blockStore *store.BlockStore // store the blockchain to disk bcReactor service.Service // for block-syncing @@ -71,39 +69,36 @@ type nodeImpl struct { mempool mempool.Mempool stateSync bool // whether the node should state sync on startup stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots - consensusState *cs.State // latest consensus state - consensusReactor *cs.Reactor // for participating in the consensus - pexReactor *pex.Reactor // for exchanging peer addresses - pexReactorV2 *pex.ReactorV2 // for exchanging peer addresses - evidenceReactor *evidence.Reactor - evidencePool *evidence.Pool // tracking evidence - proxyApp proxy.AppConns // connection to the application + consensusReactor *consensus.Reactor // for participating in the consensus + pexReactor service.Service // for exchanging peer addresses + evidenceReactor service.Service rpcListeners []net.Listener // rpc servers - eventSinks []indexer.EventSink - indexerService *indexer.Service + shutdownOps closer + indexerService service.Service + rpcEnv *rpccore.Environment prometheusSrv *http.Server } // newDefaultNode returns a Tendermint node with default settings for the // PrivValidator, ClientCreator, GenesisDoc, and DBProvider. // It implements NodeProvider. -func newDefaultNode(config *cfg.Config, logger log.Logger) (service.Service, error) { - nodeKey, err := types.LoadOrGenNodeKey(config.NodeKeyFile()) +func newDefaultNode(cfg *config.Config, logger log.Logger) (service.Service, error) { + nodeKey, err := types.LoadOrGenNodeKey(cfg.NodeKeyFile()) if err != nil { - return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err) + return nil, fmt.Errorf("failed to load or gen node key %s: %w", cfg.NodeKeyFile(), err) } - if config.Mode == cfg.ModeSeed { - return makeSeedNode(config, - cfg.DefaultDBProvider, + if cfg.Mode == config.ModeSeed { + return makeSeedNode(cfg, + config.DefaultDBProvider, nodeKey, - defaultGenesisDocProviderFunc(config), + defaultGenesisDocProviderFunc(cfg), logger, ) } var pval *privval.FilePV - if config.Mode == cfg.ModeValidator { - pval, err = privval.LoadOrGenFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile()) + if cfg.Mode == config.ModeValidator { + pval, err = privval.LoadOrGenFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile()) if err != nil { return nil, err } @@ -111,53 +106,64 @@ func newDefaultNode(config *cfg.Config, logger log.Logger) (service.Service, err pval = nil } - appClient, _ := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()) - return makeNode(config, + appClient, _ := proxy.DefaultClientCreator(cfg.ProxyApp, cfg.ABCI, cfg.DBDir()) + + return makeNode(cfg, pval, nodeKey, appClient, - defaultGenesisDocProviderFunc(config), - cfg.DefaultDBProvider, + defaultGenesisDocProviderFunc(cfg), + config.DefaultDBProvider, logger, ) } // makeNode returns a new, ready to go, Tendermint Node. -func makeNode(config *cfg.Config, +func makeNode(cfg *config.Config, privValidator types.PrivValidator, nodeKey types.NodeKey, - clientCreator proxy.ClientCreator, + clientCreator abciclient.Creator, genesisDocProvider genesisDocProvider, - dbProvider cfg.DBProvider, - logger log.Logger) (service.Service, error) { + dbProvider config.DBProvider, + logger log.Logger, +) (service.Service, error) { + closers := []closer{} - blockStore, stateDB, err := initDBs(config, dbProvider) + blockStore, stateDB, dbCloser, err := initDBs(cfg, dbProvider) if err != nil { - return nil, err + return nil, combineCloseError(err, dbCloser) } + closers = append(closers, dbCloser) + stateStore := sm.NewStore(stateDB) genDoc, err := genesisDocProvider() if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) } err = genDoc.ValidateAndComplete() if err != nil { - return nil, fmt.Errorf("error in genesis doc: %w", err) + return nil, combineCloseError( + fmt.Errorf("error in genesis doc: %w", err), + makeCloser(closers)) } // Either load state from a previous run or generate the genesis state from // genesis doc state, err := loadStateFromDBOrGenesisDocProvider(stateStore, genDoc) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) + } + nodeMetrics := defaultMetricsProvider(cfg.Instrumentation)(genDoc.ChainID) + // Create the proxyApp and establish connections to the ABCI app (consensus, mempool, query). - proxyApp, err := createAndStartProxyAppConns(clientCreator, logger) + proxyApp, err := createAndStartProxyAppConns(clientCreator, logger, nodeMetrics.proxy) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) + } // EventBus and IndexerService must be started before the handshake because @@ -166,45 +172,54 @@ func makeNode(config *cfg.Config, // but before it indexed the txs, or, endblocker panicked) eventBus, err := createAndStartEventBus(logger) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) + } - indexerService, eventSinks, err := createAndStartIndexerService(config, dbProvider, eventBus, logger, genDoc.ChainID) + indexerService, eventSinks, err := createAndStartIndexerService(cfg, dbProvider, eventBus, logger, genDoc.ChainID) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) } // If an address is provided, listen on the socket for a connection from an // external signing process. - if config.PrivValidator.ListenAddr != "" { - protocol, _ := tmnet.ProtocolAndAddress(config.PrivValidator.ListenAddr) + if cfg.PrivValidator.ListenAddr != "" { + protocol, _ := tmnet.ProtocolAndAddress(cfg.PrivValidator.ListenAddr) // FIXME: we should start services inside OnStart switch protocol { case "grpc": - privValidator, err = createAndStartPrivValidatorGRPCClient(config, genDoc.ChainID, logger) + privValidator, err = createAndStartPrivValidatorGRPCClient(cfg, genDoc.ChainID, logger) if err != nil { - return nil, fmt.Errorf("error with private validator grpc client: %w", err) + return nil, combineCloseError( + fmt.Errorf("error with private validator grpc client: %w", err), + makeCloser(closers)) } default: - privValidator, err = createAndStartPrivValidatorSocketClient(config.PrivValidator.ListenAddr, genDoc.ChainID, logger) + privValidator, err = createAndStartPrivValidatorSocketClient(cfg.PrivValidator.ListenAddr, genDoc.ChainID, logger) if err != nil { - return nil, fmt.Errorf("error with private validator socket client: %w", err) + return nil, combineCloseError( + fmt.Errorf("error with private validator socket client: %w", err), + makeCloser(closers)) } } } var pubKey crypto.PubKey - if config.Mode == cfg.ModeValidator { + if cfg.Mode == config.ModeValidator { pubKey, err = privValidator.GetPubKey(context.TODO()) if err != nil { - return nil, fmt.Errorf("can't get pubkey: %w", err) + return nil, combineCloseError(fmt.Errorf("can't get pubkey: %w", err), + makeCloser(closers)) + } if pubKey == nil { - return nil, errors.New("could not retrieve public key from private validator") + return nil, combineCloseError( + errors.New("could not retrieve public key from private validator"), + makeCloser(closers)) } } // Determine whether we should attempt state sync. - stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey) + stateSync := cfg.StateSync.Enable && !onlyValidatorIsUs(state, pubKey) if stateSync && state.LastBlockHeight > 0 { logger.Info("Found local state with non-zero height, skipping state sync") stateSync = false @@ -212,42 +227,50 @@ func makeNode(config *cfg.Config, // Determine whether we should do block sync. This must happen after the handshake, since the // app may modify the validator set, specifying ourself as the only validator. - blockSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey) + blockSync := cfg.BlockSync.Enable && !onlyValidatorIsUs(state, pubKey) + + logNodeStartupInfo(state, pubKey, logger, cfg.Mode) // TODO: Fetch and provide real options and do proper p2p bootstrapping. - nodeInfo, err := makeNodeInfo(config, nodeKey, eventSinks, genDoc, state) + nodeInfo, err := makeNodeInfo(cfg, nodeKey, eventSinks, genDoc, state) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) + } p2pLogger := logger.With("module", "p2p") - transport := createTransport(p2pLogger, config) + transport := createTransport(p2pLogger, cfg) - peerManager, err := createPeerManager(config, dbProvider, p2pLogger, nodeKey.ID) + peerManager, peerCloser, err := createPeerManager(cfg, dbProvider, nodeKey.ID) + closers = append(closers, peerCloser) if err != nil { - return nil, fmt.Errorf("failed to create peer manager: %w", err) + return nil, combineCloseError( + fmt.Errorf("failed to create peer manager: %w", err), + makeCloser(closers)) } - csMetrics, p2pMetrics, memplMetrics, smMetrics := defaultMetricsProvider(config.Instrumentation)(genDoc.ChainID) - - router, err := createRouter(p2pLogger, p2pMetrics, nodeInfo, nodeKey.PrivKey, - peerManager, transport, getRouterConfig(config, proxyApp)) + router, err := createRouter(p2pLogger, nodeMetrics.p2p, nodeInfo, nodeKey.PrivKey, + peerManager, transport, getRouterConfig(cfg, proxyApp)) if err != nil { - return nil, fmt.Errorf("failed to create router: %w", err) + return nil, combineCloseError( + fmt.Errorf("failed to create router: %w", err), + makeCloser(closers)) } - mpReactorShim, mpReactor, mp, err := createMempoolReactor( - config, proxyApp, state, memplMetrics, peerManager, router, logger, + mpReactor, mp, err := createMempoolReactor( + cfg, proxyApp, state, nodeMetrics.mempool, peerManager, router, logger, ) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) + } - evReactorShim, evReactor, evPool, err := createEvidenceReactor( - config, dbProvider, stateDB, blockStore, peerManager, router, logger, + evReactor, evPool, err := createEvidenceReactor( + cfg, dbProvider, stateDB, blockStore, peerManager, router, logger, ) if err != nil { - return nil, err + return nil, combineCloseError(err, makeCloser(closers)) + } // make block executor for consensus and blockchain reactors to execute blocks @@ -258,88 +281,75 @@ func makeNode(config *cfg.Config, mp, evPool, blockStore, - sm.BlockExecutorWithMetrics(smMetrics), + sm.BlockExecutorWithMetrics(nodeMetrics.state), ) consensusLogger := logger.With("module", "consensus") - csReactorShim, csReactor, csState := createConsensusReactor( - config, state, blockExec, blockStore, mp, evPool, - privValidator, csMetrics, stateSync || blockSync, eventBus, + csReactor, csState, err := createConsensusReactor( + cfg, state, blockExec, blockStore, mp, evPool, + privValidator, nodeMetrics.consensus, stateSync || blockSync, eventBus, peerManager, router, consensusLogger, ) + if err != nil { + return nil, combineCloseError(err, makeCloser(closers)) + } // Create the blockchain reactor. Note, we do not start block sync if we're // doing a state sync first. - bcReactorShim, bcReactor, err := createBlockchainReactor( - logger, config, state, blockExec, blockStore, csReactor, - peerManager, router, blockSync && !stateSync, csMetrics, + bcReactor, err := createBlockchainReactor( + logger, state, blockExec, blockStore, csReactor, + peerManager, router, blockSync && !stateSync, nodeMetrics.consensus, ) if err != nil { - return nil, fmt.Errorf("could not create blockchain reactor: %w", err) - } - - // TODO: Remove this once the switch is removed. - var bcReactorForSwitch p2p.Reactor - if bcReactorShim != nil { - bcReactorForSwitch = bcReactorShim - } else { - bcReactorForSwitch = bcReactor.(p2p.Reactor) + return nil, combineCloseError( + fmt.Errorf("could not create blockchain reactor: %w", err), + makeCloser(closers)) } // Make ConsensusReactor. Don't enable fully if doing a state sync and/or block sync first. // FIXME We need to update metrics here, since other reactors don't have access to them. if stateSync { - csMetrics.StateSyncing.Set(1) + nodeMetrics.consensus.StateSyncing.Set(1) } else if blockSync { - csMetrics.BlockSyncing.Set(1) + nodeMetrics.consensus.BlockSyncing.Set(1) } // Set up state sync reactor, and schedule a sync if requested. // FIXME The way we do phased startups (e.g. replay -> block sync -> consensus) is very messy, // we should clean this whole thing up. See: // https://github.com/tendermint/tendermint/issues/4644 - var ( - stateSyncReactor *statesync.Reactor - stateSyncReactorShim *p2p.ReactorShim + ssLogger := logger.With("module", "statesync") + ssChDesc := statesync.GetChannelDescriptors() + channels := make(map[p2p.ChannelID]*p2p.Channel, len(ssChDesc)) + for idx := range ssChDesc { + chd := ssChDesc[idx] + ch, err := router.OpenChannel(chd) + if err != nil { + return nil, err + } - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) - - stateSyncReactorShim = p2p.NewReactorShim(logger.With("module", "statesync"), "StateSyncShim", statesync.ChannelShims) - - if config.P2P.DisableLegacy { - channels = makeChannelsFromShims(router, statesync.ChannelShims) - peerUpdates = peerManager.Subscribe() - } else { - channels = getChannelsFromShim(stateSyncReactorShim) - peerUpdates = stateSyncReactorShim.PeerUpdates + channels[ch.ID] = ch } - stateSyncReactor = statesync.NewReactor( - *config.StateSync, - stateSyncReactorShim.Logger, + peerUpdates := peerManager.Subscribe() + stateSyncReactor := statesync.NewReactor( + genDoc.ChainID, + genDoc.InitialHeight, + *cfg.StateSync, + ssLogger, proxyApp.Snapshot(), proxyApp.Query(), channels[statesync.SnapshotChannel], channels[statesync.ChunkChannel], channels[statesync.LightBlockChannel], + channels[statesync.ParamsChannel], peerUpdates, stateStore, blockStore, - config.StateSync.TempDir, + cfg.StateSync.TempDir, + nodeMetrics.statesync, ) - // add the channel descriptors to the transport - // FIXME: This should be removed when the legacy p2p stack is removed and - // transports can either be agnostic to channel descriptors or can be - // declared in the constructor. - transport.AddChannelDescriptors(mpReactorShim.GetChannels()) - transport.AddChannelDescriptors(bcReactorForSwitch.GetChannels()) - transport.AddChannelDescriptors(csReactorShim.GetChannels()) - transport.AddChannelDescriptors(evReactorShim.GetChannels()) - transport.AddChannelDescriptors(stateSyncReactorShim.GetChannels()) - // Optionally, start the pex reactor // // TODO: @@ -353,66 +363,29 @@ func makeNode(config *cfg.Config, // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it. // Note we currently use the addrBook regardless at least for AddOurAddress - var ( - pexReactor *pex.Reactor - pexReactorV2 *pex.ReactorV2 - sw *p2p.Switch - addrBook pex.AddrBook - ) + var pexReactor service.Service - pexCh := pex.ChannelDescriptor() - transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) + pexReactor, err = createPEXReactor(logger, peerManager, router) + if err != nil { + return nil, combineCloseError(err, makeCloser(closers)) - if config.P2P.PexReactor { - if config.P2P.DisableLegacy { - addrBook = nil - pexReactorV2, err = createPEXReactorV2(config, logger, peerManager, router) - if err != nil { - return nil, err - } - } else { - // setup Transport and Switch - sw = createSwitch( - config, transport, p2pMetrics, mpReactorShim, bcReactorForSwitch, - stateSyncReactorShim, csReactorShim, evReactorShim, proxyApp, nodeInfo, nodeKey, p2pLogger, - ) - - err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peers from persistent-peers field: %w", err) - } - - err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) - } - - addrBook, err = createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey) - if err != nil { - return nil, fmt.Errorf("could not create addrbook: %w", err) - } - - pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) - } } - if config.RPC.PprofListenAddress != "" { + if cfg.RPC.PprofListenAddress != "" { go func() { - logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress) - logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil)) + logger.Info("Starting pprof server", "laddr", cfg.RPC.PprofListenAddress) + logger.Error("pprof server error", "err", http.ListenAndServe(cfg.RPC.PprofListenAddress, nil)) }() } node := &nodeImpl{ - config: config, + config: cfg, genesisDoc: genDoc, privValidator: privValidator, transport: transport, - sw: sw, peerManager: peerManager, router: router, - addrBook: addrBook, nodeInfo: nodeInfo, nodeKey: nodeKey, @@ -421,27 +394,50 @@ func makeNode(config *cfg.Config, bcReactor: bcReactor, mempoolReactor: mpReactor, mempool: mp, - consensusState: csState, consensusReactor: csReactor, stateSyncReactor: stateSyncReactor, stateSync: stateSync, pexReactor: pexReactor, - pexReactorV2: pexReactorV2, evidenceReactor: evReactor, - evidencePool: evPool, - proxyApp: proxyApp, indexerService: indexerService, eventBus: eventBus, eventSinks: eventSinks, + + shutdownOps: makeCloser(closers), + + rpcEnv: &rpccore.Environment{ + ProxyAppQuery: proxyApp.Query(), + ProxyAppMempool: proxyApp.Mempool(), + + StateStore: stateStore, + BlockStore: blockStore, + EvidencePool: evPool, + ConsensusState: csState, + + ConsensusReactor: csReactor, + BlockSyncReactor: bcReactor.(consensus.BlockSyncReactor), + + PeerManager: peerManager, + + GenDoc: genDoc, + EventSinks: eventSinks, + EventBus: eventBus, + Mempool: mp, + Logger: logger.With("module", "rpc"), + Config: *cfg.RPC, + }, } + + node.rpcEnv.P2PTransport = node + node.BaseService = *service.NewBaseService(logger, "Node", node) return node, nil } // makeSeedNode returns a new seed node, containing only p2p, pex reactor -func makeSeedNode(config *cfg.Config, - dbProvider cfg.DBProvider, +func makeSeedNode(cfg *config.Config, + dbProvider config.DBProvider, nodeKey types.NodeKey, genesisDocProvider genesisDocProvider, logger log.Logger, @@ -455,89 +451,62 @@ func makeSeedNode(config *cfg.Config, state, err := sm.MakeGenesisState(genDoc) if err != nil { return nil, err + } - nodeInfo, err := makeSeedNodeInfo(config, nodeKey, genDoc, state) + nodeInfo, err := makeSeedNodeInfo(cfg, nodeKey, genDoc, state) if err != nil { return nil, err } // Setup Transport and Switch. - p2pMetrics := p2p.PrometheusMetrics(config.Instrumentation.Namespace, "chain_id", genDoc.ChainID) + p2pMetrics := p2p.PrometheusMetrics(cfg.Instrumentation.Namespace, "chain_id", genDoc.ChainID) p2pLogger := logger.With("module", "p2p") - transport := createTransport(p2pLogger, config) - sw := createSwitch( - config, transport, p2pMetrics, nil, nil, - nil, nil, nil, nil, nodeInfo, nodeKey, p2pLogger, - ) + transport := createTransport(p2pLogger, cfg) - err = sw.AddPersistentPeers(strings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) + peerManager, closer, err := createPeerManager(cfg, dbProvider, nodeKey.ID) if err != nil { - return nil, fmt.Errorf("could not add peers from persistent_peers field: %w", err) - } - - err = sw.AddUnconditionalPeerIDs(strings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")) - if err != nil { - return nil, fmt.Errorf("could not add peer ids from unconditional_peer_ids field: %w", err) - } - - addrBook, err := createAddrBookAndSetOnSwitch(config, sw, p2pLogger, nodeKey) - if err != nil { - return nil, fmt.Errorf("could not create addrbook: %w", err) - } - - peerManager, err := createPeerManager(config, dbProvider, p2pLogger, nodeKey.ID) - if err != nil { - return nil, fmt.Errorf("failed to create peer manager: %w", err) + return nil, combineCloseError( + fmt.Errorf("failed to create peer manager: %w", err), + closer) } router, err := createRouter(p2pLogger, p2pMetrics, nodeInfo, nodeKey.PrivKey, - peerManager, transport, getRouterConfig(config, nil)) + peerManager, transport, getRouterConfig(cfg, nil)) if err != nil { - return nil, fmt.Errorf("failed to create router: %w", err) + return nil, combineCloseError( + fmt.Errorf("failed to create router: %w", err), + closer) } - var ( - pexReactor *pex.Reactor - pexReactorV2 *pex.ReactorV2 - ) + var pexReactor service.Service + + pexReactor, err = createPEXReactor(logger, peerManager, router) + if err != nil { + return nil, combineCloseError(err, closer) - // add the pex reactor - // FIXME: we add channel descriptors to both the router and the transport but only the router - // should be aware of channel info. We should remove this from transport once the legacy - // p2p stack is removed. - pexCh := pex.ChannelDescriptor() - transport.AddChannelDescriptors([]*p2p.ChannelDescriptor{&pexCh}) - if config.P2P.DisableLegacy { - pexReactorV2, err = createPEXReactorV2(config, logger, peerManager, router) - if err != nil { - return nil, err - } - } else { - pexReactor = createPEXReactorAndAddToSwitch(addrBook, config, sw, logger) } - if config.RPC.PprofListenAddress != "" { + if cfg.RPC.PprofListenAddress != "" { go func() { - logger.Info("Starting pprof server", "laddr", config.RPC.PprofListenAddress) - logger.Error("pprof server error", "err", http.ListenAndServe(config.RPC.PprofListenAddress, nil)) + logger.Info("Starting pprof server", "laddr", cfg.RPC.PprofListenAddress) + logger.Error("pprof server error", "err", http.ListenAndServe(cfg.RPC.PprofListenAddress, nil)) }() } node := &nodeImpl{ - config: config, + config: cfg, genesisDoc: genDoc, transport: transport, - sw: sw, - addrBook: addrBook, nodeInfo: nodeInfo, nodeKey: nodeKey, peerManager: peerManager, router: router, - pexReactor: pexReactor, - pexReactorV2: pexReactorV2, + shutdownOps: closer, + + pexReactor: pexReactor, } node.BaseService = *service.NewBaseService(logger, "SeedNode", node) @@ -580,7 +549,7 @@ func (n *nodeImpl) OnStart() error { // Start the RPC server before the P2P server // so we can eg. receive txs for the first block - if n.config.RPC.ListenAddress != "" && n.config.Mode != cfg.ModeSeed { + if n.config.RPC.ListenAddress != "" && n.config.Mode != config.ModeSeed { listeners, err := n.startRPC() if err != nil { return err @@ -595,45 +564,23 @@ func (n *nodeImpl) OnStart() error { } // Start the transport. - addr, err := types.NewNetAddressString(n.nodeKey.ID.AddressString(n.config.P2P.ListenAddress)) + ep, err := p2p.NewEndpoint(n.nodeKey.ID.AddressString(n.config.P2P.ListenAddress)) if err != nil { return err } - if err := n.transport.Listen(p2p.NewEndpoint(addr)); err != nil { + if err := n.transport.Listen(ep); err != nil { return err } // Start the P2P layer n.isListening = true - n.Logger.Info("p2p service", "legacy_enabled", !n.config.P2P.DisableLegacy) - if n.config.P2P.DisableLegacy { - if err := n.router.Start(); err != nil { - return err - } - } else { - // Add private IDs to addrbook to block those peers being added - n.addrBook.AddPrivateIDs(strings.SplitAndTrimEmpty(n.config.P2P.PrivatePeerIDs, ",", " ")) - if err := n.sw.Start(); err != nil { - return err - } - // Always connect to persistent peers - err = n.sw.DialPeersAsync(strings.SplitAndTrimEmpty(n.config.P2P.PersistentPeers, ",", " ")) - if err != nil { - return fmt.Errorf("could not dial peers from persistent-peers field: %w", err) - } + if err = n.router.Start(); err != nil { + return err } - // Start the Peer Exhange reactor so we can discover new peers - if n.config.P2P.DisableLegacy && n.pexReactorV2 != nil { - if err := n.pexReactorV2.Start(); err != nil { - return err - } - } - - if n.config.Mode != cfg.ModeSeed { - if n.config.BlockSync.Version == cfg.BlockSyncV0 { - // Start the real blockchain reactor separately since the switch uses the shim. + if n.config.Mode != config.ModeSeed { + if n.config.BlockSync.Enable { if err := n.bcReactor.Start(); err != nil { return err } @@ -660,9 +607,15 @@ func (n *nodeImpl) OnStart() error { } } + if err := n.pexReactor.Start(); err != nil { + return err + } + // Run state sync + // TODO: We shouldn't run state sync if we already have state that has a + // LastBlockHeight that is not InitialHeight if n.stateSync { - bcR, ok := n.bcReactor.(cs.BlockSyncReactor) + bcR, ok := n.bcReactor.(consensus.BlockSyncReactor) if !ok { return fmt.Errorf("this blockchain reactor does not support switching from state sync") } @@ -673,17 +626,56 @@ func (n *nodeImpl) OnStart() error { return fmt.Errorf("unable to derive state: %w", err) } - ssc := n.config.StateSync - sp, err := constructStateProvider(ssc, state, n.Logger.With("module", "light")) - - if err != nil { - return fmt.Errorf("failed to set up light client state provider: %w", err) + // TODO: we may want to move these events within the respective + // reactors. + // At the beginning of the statesync start, we use the initialHeight as the event height + // because of the statesync doesn't have the concreate state height before fetched the snapshot. + d := types.EventDataStateSyncStatus{Complete: false, Height: state.InitialHeight} + if err := n.eventBus.PublishEventStateSyncStatus(d); err != nil { + n.eventBus.Logger.Error("failed to emit the statesync start event", "err", err) } - if err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, sp, - ssc, n.config.FastSyncMode, state.InitialHeight, n.eventBus); err != nil { - return fmt.Errorf("failed to start state sync: %w", err) - } + // FIXME: We shouldn't allow state sync to silently error out without + // bubbling up the error and gracefully shutting down the rest of the node + go func() { + n.Logger.Info("starting state sync") + state, err := n.stateSyncReactor.Sync(context.TODO()) + if err != nil { + n.Logger.Error("state sync failed; shutting down this node", "err", err) + // stop the node + if err := n.Stop(); err != nil { + n.Logger.Error("failed to shut down node", "err", err) + } + return + } + + n.consensusReactor.SetStateSyncingMetrics(0) + + d := types.EventDataStateSyncStatus{Complete: true, Height: state.LastBlockHeight} + if err := n.eventBus.PublishEventStateSyncStatus(d); err != nil { + n.eventBus.Logger.Error("failed to emit the statesync start event", "err", err) + } + + // TODO: Some form of orchestrator is needed here between the state + // advancing reactors to be able to control which one of the three + // is running + if n.config.BlockSync.Enable { + // FIXME Very ugly to have these metrics bleed through here. + n.consensusReactor.SetBlockSyncingMetrics(1) + if err := bcR.SwitchToBlockSync(state); err != nil { + n.Logger.Error("failed to switch to block sync", "err", err) + return + } + + d := types.EventDataBlockSyncStatus{Complete: false, Height: state.LastBlockHeight} + if err := n.eventBus.PublishEventBlockSyncStatus(d); err != nil { + n.eventBus.Logger.Error("failed to emit the block sync starting event", "err", err) + } + + } else { + n.consensusReactor.SwitchToConsensus(state, true) + } + }() } return nil @@ -694,17 +686,27 @@ func (n *nodeImpl) OnStop() { n.Logger.Info("Stopping Node") - // first stop the non-reactor services - if err := n.eventBus.Stop(); err != nil { - n.Logger.Error("Error closing eventBus", "err", err) + if n.eventBus != nil { + // first stop the non-reactor services + if err := n.eventBus.Stop(); err != nil { + n.Logger.Error("Error closing eventBus", "err", err) + } } - if err := n.indexerService.Stop(); err != nil { - n.Logger.Error("Error closing indexerService", "err", err) + if n.indexerService != nil { + if err := n.indexerService.Stop(); err != nil { + n.Logger.Error("Error closing indexerService", "err", err) + } } - if n.config.Mode != cfg.ModeSeed { + for _, es := range n.eventSinks { + if err := es.Stop(); err != nil { + n.Logger.Error("failed to stop event sink", "err", err) + } + } + + if n.config.Mode != config.ModeSeed { // now stop the reactors - if n.config.BlockSync.Version == cfg.BlockSyncV0 { + if n.config.BlockSync.Enable { // Stop the real blockchain reactor separately since the switch uses the shim. if err := n.bcReactor.Stop(); err != nil { n.Logger.Error("failed to stop the blockchain reactor", "err", err) @@ -732,20 +734,12 @@ func (n *nodeImpl) OnStop() { } } - if n.config.P2P.DisableLegacy && n.pexReactorV2 != nil { - if err := n.pexReactorV2.Stop(); err != nil { - n.Logger.Error("failed to stop the PEX v2 reactor", "err", err) - } + if err := n.pexReactor.Stop(); err != nil { + n.Logger.Error("failed to stop the PEX v2 reactor", "err", err) } - if n.config.P2P.DisableLegacy { - if err := n.router.Stop(); err != nil { - n.Logger.Error("failed to stop router", "err", err) - } - } else { - if err := n.sw.Stop(); err != nil { - n.Logger.Error("failed to stop switch", "err", err) - } + if err := n.router.Stop(); err != nil { + n.Logger.Error("failed to stop router", "err", err) } if err := n.transport.Close(); err != nil { @@ -773,69 +767,41 @@ func (n *nodeImpl) OnStop() { // Error from closing listeners, or context timeout: n.Logger.Error("Prometheus HTTP server Shutdown", "err", err) } + + } + if err := n.shutdownOps(); err != nil { + n.Logger.Error("problem shutting down additional services", "err", err) } } -// ConfigureRPC makes sure RPC has all the objects it needs to operate. -func (n *nodeImpl) ConfigureRPC() (*rpccore.Environment, error) { - rpcCoreEnv := rpccore.Environment{ - ProxyAppQuery: n.proxyApp.Query(), - ProxyAppMempool: n.proxyApp.Mempool(), - - StateStore: n.stateStore, - BlockStore: n.blockStore, - EvidencePool: n.evidencePool, - ConsensusState: n.consensusState, - P2PPeers: n.sw, - P2PTransport: n, - - GenDoc: n.genesisDoc, - EventSinks: n.eventSinks, - ConsensusReactor: n.consensusReactor, - EventBus: n.eventBus, - Mempool: n.mempool, - - Logger: n.Logger.With("module", "rpc"), - - Config: *n.config.RPC, - BlockSyncReactor: n.bcReactor.(cs.BlockSyncReactor), - } - if n.config.Mode == cfg.ModeValidator { +func (n *nodeImpl) startRPC() ([]net.Listener, error) { + if n.config.Mode == config.ModeValidator { pubKey, err := n.privValidator.GetPubKey(context.TODO()) if pubKey == nil || err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) } - rpcCoreEnv.PubKey = pubKey + n.rpcEnv.PubKey = pubKey } - if err := rpcCoreEnv.InitGenesisChunks(); err != nil { - return nil, err - } - - return &rpcCoreEnv, nil -} - -func (n *nodeImpl) startRPC() ([]net.Listener, error) { - env, err := n.ConfigureRPC() - if err != nil { + if err := n.rpcEnv.InitGenesisChunks(); err != nil { return nil, err } listenAddrs := strings.SplitAndTrimEmpty(n.config.RPC.ListenAddress, ",", " ") - routes := env.GetRoutes() + routes := n.rpcEnv.GetRoutes() if n.config.RPC.Unsafe { - env.AddUnsafe(routes) + n.rpcEnv.AddUnsafe(routes) } - config := rpcserver.DefaultConfig() - config.MaxBodyBytes = n.config.RPC.MaxBodyBytes - config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes - config.MaxOpenConnections = n.config.RPC.MaxOpenConnections + cfg := rpcserver.DefaultConfig() + cfg.MaxBodyBytes = n.config.RPC.MaxBodyBytes + cfg.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes + cfg.MaxOpenConnections = n.config.RPC.MaxOpenConnections // If necessary adjust global WriteTimeout to ensure it's greater than // TimeoutBroadcastTxCommit. // See https://github.com/tendermint/tendermint/issues/3435 - if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { - config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second + if cfg.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { + cfg.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second } // we may expose the rpc over both a unix and tcp socket @@ -851,14 +817,14 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) { wmLogger.Error("Failed to unsubscribe addr from events", "addr", remoteAddr, "err", err) } }), - rpcserver.ReadLimit(config.MaxBodyBytes), + rpcserver.ReadLimit(cfg.MaxBodyBytes), ) wm.SetLogger(wmLogger) mux.HandleFunc("/websocket", wm.WebsocketHandler) rpcserver.RegisterRPCFuncs(mux, routes, rpcLogger) listener, err := rpcserver.Listen( listenAddr, - config, + cfg.MaxOpenConnections, ) if err != nil { return nil, err @@ -881,7 +847,7 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) { n.config.RPC.CertFile(), n.config.RPC.KeyFile(), rpcLogger, - config, + cfg, ); err != nil { n.Logger.Error("Error serving server with TLS", "err", err) } @@ -892,7 +858,7 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) { listener, rootHandler, rpcLogger, - config, + cfg, ); err != nil { n.Logger.Error("Error serving server", "err", err) } @@ -902,35 +868,7 @@ func (n *nodeImpl) startRPC() ([]net.Listener, error) { listeners[i] = listener } - // we expose a simplified api over grpc for convenience to app devs - grpcListenAddr := n.config.RPC.GRPCListenAddress - if grpcListenAddr != "" { - config := rpcserver.DefaultConfig() - config.MaxBodyBytes = n.config.RPC.MaxBodyBytes - config.MaxHeaderBytes = n.config.RPC.MaxHeaderBytes - // NOTE: GRPCMaxOpenConnections is used, not MaxOpenConnections - config.MaxOpenConnections = n.config.RPC.GRPCMaxOpenConnections - // If necessary adjust global WriteTimeout to ensure it's greater than - // TimeoutBroadcastTxCommit. - // See https://github.com/tendermint/tendermint/issues/3435 - if config.WriteTimeout <= n.config.RPC.TimeoutBroadcastTxCommit { - config.WriteTimeout = n.config.RPC.TimeoutBroadcastTxCommit + 1*time.Second - } - listener, err := rpcserver.Listen(grpcListenAddr, config) - if err != nil { - return nil, err - } - go func() { - if err := grpccore.StartGRPCServer(env, listener); err != nil { - n.Logger.Error("Error starting gRPC server", "err", err) - } - }() - listeners = append(listeners, listener) - - } - return listeners, nil - } // startPrometheusServer starts a Prometheus HTTP server, listening for metrics @@ -954,46 +892,16 @@ func (n *nodeImpl) startPrometheusServer(addr string) *http.Server { return srv } -// Switch returns the Node's Switch. -func (n *nodeImpl) Switch() *p2p.Switch { - return n.sw -} - -// BlockStore returns the Node's BlockStore. -func (n *nodeImpl) BlockStore() *store.BlockStore { - return n.blockStore -} - -// ConsensusState returns the Node's ConsensusState. -func (n *nodeImpl) ConsensusState() *cs.State { - return n.consensusState -} - // ConsensusReactor returns the Node's ConsensusReactor. -func (n *nodeImpl) ConsensusReactor() *cs.Reactor { +func (n *nodeImpl) ConsensusReactor() *consensus.Reactor { return n.consensusReactor } -// MempoolReactor returns the Node's mempool reactor. -func (n *nodeImpl) MempoolReactor() service.Service { - return n.mempoolReactor -} - // Mempool returns the Node's mempool. func (n *nodeImpl) Mempool() mempool.Mempool { return n.mempool } -// PEXReactor returns the Node's PEXReactor. It returns nil if PEX is disabled. -func (n *nodeImpl) PEXReactor() *pex.Reactor { - return n.pexReactor -} - -// EvidencePool returns the Node's EvidencePool. -func (n *nodeImpl) EvidencePool() *evidence.Pool { - return n.evidencePool -} - // EventBus returns the Node's EventBus. func (n *nodeImpl) EventBus() *types.EventBus { return n.eventBus @@ -1010,19 +918,9 @@ func (n *nodeImpl) GenesisDoc() *types.GenesisDoc { return n.genesisDoc } -// ProxyApp returns the Node's AppConns, representing its connections to the ABCI application. -func (n *nodeImpl) ProxyApp() proxy.AppConns { - return n.proxyApp -} - -// Config returns the Node's config. -func (n *nodeImpl) Config() *cfg.Config { - return n.config -} - -// EventSinks returns the Node's event indexing sinks. -func (n *nodeImpl) EventSinks() []indexer.EventSink { - return n.eventSinks +// RPCEnvironment makes sure RPC has all the objects it needs to operate. +func (n *nodeImpl) RPCEnvironment() *rpccore.Environment { + return n.rpcEnv } //------------------------------------------------------------------------------ @@ -1042,67 +940,6 @@ func (n *nodeImpl) NodeInfo() types.NodeInfo { return n.nodeInfo } -// startStateSync starts an asynchronous state sync process, then switches to block sync mode. -func startStateSync( - ssR statesync.SyncReactor, - bcR cs.BlockSyncReactor, - conR cs.ConsSyncReactor, - sp statesync.StateProvider, - config *cfg.StateSyncConfig, - blockSync bool, - stateInitHeight int64, - eb *types.EventBus, -) error { - stateSyncLogger := eb.Logger.With("module", "statesync") - - stateSyncLogger.Info("starting state sync...") - - // at the beginning of the statesync start, we use the initialHeight as the event height - // because of the statesync doesn't have the concreate state height before fetched the snapshot. - d := types.EventDataStateSyncStatus{Complete: false, Height: stateInitHeight} - if err := eb.PublishEventStateSyncStatus(d); err != nil { - stateSyncLogger.Error("failed to emit the statesync start event", "err", err) - } - - go func() { - state, err := ssR.Sync(context.TODO(), sp, config.DiscoveryTime) - if err != nil { - stateSyncLogger.Error("state sync failed", "err", err) - return - } - - if err := ssR.Backfill(state); err != nil { - stateSyncLogger.Error("backfill failed; node has insufficient history to verify all evidence;"+ - " proceeding optimistically...", "err", err) - } - - conR.SetStateSyncingMetrics(0) - - d := types.EventDataStateSyncStatus{Complete: true, Height: state.LastBlockHeight} - if err := eb.PublishEventStateSyncStatus(d); err != nil { - stateSyncLogger.Error("failed to emit the statesync start event", "err", err) - } - - if blockSync { - // FIXME Very ugly to have these metrics bleed through here. - conR.SetBlockSyncingMetrics(1) - if err := bcR.SwitchToBlockSync(state); err != nil { - stateSyncLogger.Error("failed to switch to block sync", "err", err) - return - } - - d := types.EventDataBlockSyncStatus{Complete: false, Height: state.LastBlockHeight} - if err := eb.PublishEventBlockSyncStatus(d); err != nil { - stateSyncLogger.Error("failed to emit the block sync starting event", "err", err) - } - - } else { - conR.SwitchToConsensus(state, true) - } - }() - return nil -} - // genesisDocProvider returns a GenesisDoc. // It allows the GenesisDoc to be pulled from sources other than the // filesystem, for instance from a distributed key-value store cluster. @@ -1110,26 +947,46 @@ type genesisDocProvider func() (*types.GenesisDoc, error) // defaultGenesisDocProviderFunc returns a GenesisDocProvider that loads // the GenesisDoc from the config.GenesisFile() on the filesystem. -func defaultGenesisDocProviderFunc(config *cfg.Config) genesisDocProvider { +func defaultGenesisDocProviderFunc(cfg *config.Config) genesisDocProvider { return func() (*types.GenesisDoc, error) { - return types.GenesisDocFromFile(config.GenesisFile()) + return types.GenesisDocFromFile(cfg.GenesisFile()) } } -// metricsProvider returns a consensus, p2p and mempool Metrics. -type metricsProvider func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempool.Metrics, *sm.Metrics) +type nodeMetrics struct { + consensus *consensus.Metrics + p2p *p2p.Metrics + mempool *mempool.Metrics + state *sm.Metrics + statesync *statesync.Metrics + proxy *proxy.Metrics +} + +// metricsProvider returns consensus, p2p, mempool, state, statesync Metrics. +type metricsProvider func(chainID string) *nodeMetrics // defaultMetricsProvider returns Metrics build using Prometheus client library // if Prometheus is enabled. Otherwise, it returns no-op Metrics. -func defaultMetricsProvider(config *cfg.InstrumentationConfig) metricsProvider { - return func(chainID string) (*cs.Metrics, *p2p.Metrics, *mempool.Metrics, *sm.Metrics) { - if config.Prometheus { - return cs.PrometheusMetrics(config.Namespace, "chain_id", chainID), - p2p.PrometheusMetrics(config.Namespace, "chain_id", chainID), - mempool.PrometheusMetrics(config.Namespace, "chain_id", chainID), - sm.PrometheusMetrics(config.Namespace, "chain_id", chainID) +func defaultMetricsProvider(cfg *config.InstrumentationConfig) metricsProvider { + return func(chainID string) *nodeMetrics { + if cfg.Prometheus { + return &nodeMetrics{ + consensus.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + p2p.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + mempool.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + sm.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + statesync.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + proxy.PrometheusMetrics(cfg.Namespace, "chain_id", chainID), + } + } + return &nodeMetrics{ + consensus.NopMetrics(), + p2p.NopMetrics(), + mempool.NopMetrics(), + sm.NopMetrics(), + statesync.NopMetrics(), + proxy.NopMetrics(), } - return cs.NopMetrics(), p2p.NopMetrics(), mempool.NopMetrics(), sm.NopMetrics() } } @@ -1192,15 +1049,15 @@ func createAndStartPrivValidatorSocketClient( } func createAndStartPrivValidatorGRPCClient( - config *cfg.Config, + cfg *config.Config, chainID string, logger log.Logger, ) (types.PrivValidator, error) { pvsc, err := tmgrpc.DialRemoteSigner( - config.PrivValidator, + cfg.PrivValidator, chainID, logger, - config.Instrumentation.Prometheus, + cfg.Instrumentation.Prometheus, ) if err != nil { return nil, fmt.Errorf("failed to start private validator: %w", err) @@ -1215,15 +1072,11 @@ func createAndStartPrivValidatorGRPCClient( return pvsc, nil } -func getRouterConfig(conf *cfg.Config, proxyApp proxy.AppConns) p2p.RouterOptions { +func getRouterConfig(conf *config.Config, proxyApp proxy.AppConns) p2p.RouterOptions { opts := p2p.RouterOptions{ QueueType: conf.P2P.QueueType, } - if conf.P2P.MaxNumInboundPeers > 0 { - opts.MaxIncomingConnectionAttempts = conf.P2P.MaxIncomingConnectionAttempts - } - if conf.FilterPeers && proxyApp != nil { opts.FilterPeerByID = func(ctx context.Context, id types.NodeID) error { res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ @@ -1257,52 +1110,3 @@ func getRouterConfig(conf *cfg.Config, proxyApp proxy.AppConns) p2p.RouterOption return opts } - -// FIXME: Temporary helper function, shims should be removed. -func makeChannelsFromShims( - router *p2p.Router, - chShims map[p2p.ChannelID]*p2p.ChannelDescriptorShim, -) map[p2p.ChannelID]*p2p.Channel { - - channels := map[p2p.ChannelID]*p2p.Channel{} - for chID, chShim := range chShims { - ch, err := router.OpenChannel(*chShim.Descriptor, chShim.MsgType, chShim.Descriptor.RecvBufferCapacity) - if err != nil { - panic(fmt.Sprintf("failed to open channel %v: %v", chID, err)) - } - - channels[chID] = ch - } - - return channels -} - -func getChannelsFromShim(reactorShim *p2p.ReactorShim) map[p2p.ChannelID]*p2p.Channel { - channels := map[p2p.ChannelID]*p2p.Channel{} - for chID := range reactorShim.Channels { - channels[chID] = reactorShim.GetChannel(chID) - } - - return channels -} - -func constructStateProvider( - ssc *cfg.StateSyncConfig, - state sm.State, - logger log.Logger, -) (statesync.StateProvider, error) { - ctx, cancel := context.WithTimeout(context.TODO(), 10*time.Second) - defer cancel() - - to := light.TrustOptions{ - Period: ssc.TrustPeriod, - Height: ssc.TrustHeight, - Hash: ssc.TrustHashBytes(), - } - - return statesync.NewLightClientStateProvider( - ctx, - state.ChainID, state.Version, state.InitialHeight, - ssc.RPCServers, to, logger, - ) -} diff --git a/node/node_test.go b/node/node_test.go index 16edb4210..19f27a640 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -13,50 +13,55 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tm-db" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" - consmocks "github.com/tendermint/tendermint/internal/consensus/mocks" - ssmocks "github.com/tendermint/tendermint/internal/statesync/mocks" - "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" - statesync "github.com/tendermint/tendermint/internal/statesync" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/libs/service" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" ) func TestNodeStartStop(t *testing.T) { - config := cfg.ResetTestRoot("node_node_test") - defer os.RemoveAll(config.RootDir) + cfg := config.ResetTestRoot("node_node_test") + + defer os.RemoveAll(cfg.RootDir) // create & start node - ns, err := newDefaultNode(config, log.TestingLogger()) + ns, err := newDefaultNode(cfg, log.TestingLogger()) require.NoError(t, err) require.NoError(t, ns.Start()) + t.Cleanup(func() { + if ns.IsRunning() { + assert.NoError(t, ns.Stop()) + ns.Wait() + } + }) + n, ok := ns.(*nodeImpl) require.True(t, ok) - t.Logf("Started node %v", n.sw.NodeInfo()) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() // wait for the node to produce a block - blocksSub, err := n.EventBus().Subscribe(context.Background(), "node_test", types.EventQueryNewBlock) + blocksSub, err := n.EventBus().Subscribe(ctx, "node_test", types.EventQueryNewBlock) require.NoError(t, err) select { case <-blocksSub.Out(): @@ -86,38 +91,45 @@ func TestNodeStartStop(t *testing.T) { } } -func getTestNode(t *testing.T, conf *cfg.Config, logger log.Logger) *nodeImpl { +func getTestNode(t *testing.T, conf *config.Config, logger log.Logger) *nodeImpl { t.Helper() ns, err := newDefaultNode(conf, logger) require.NoError(t, err) n, ok := ns.(*nodeImpl) require.True(t, ok) + + t.Cleanup(func() { + if ns.IsRunning() { + assert.NoError(t, ns.Stop()) + ns.Wait() + } + }) + return n } func TestNodeDelayedStart(t *testing.T) { - config := cfg.ResetTestRoot("node_delayed_start_test") - defer os.RemoveAll(config.RootDir) + cfg := config.ResetTestRoot("node_delayed_start_test") + defer os.RemoveAll(cfg.RootDir) now := tmtime.Now() // create & start node - n := getTestNode(t, config, log.TestingLogger()) + n := getTestNode(t, cfg, log.TestingLogger()) n.GenesisDoc().GenesisTime = now.Add(2 * time.Second) require.NoError(t, n.Start()) - defer n.Stop() //nolint:errcheck // ignore for tests startTime := tmtime.Now() assert.Equal(t, true, startTime.After(n.GenesisDoc().GenesisTime)) } func TestNodeSetAppVersion(t *testing.T) { - config := cfg.ResetTestRoot("node_app_version_test") - defer os.RemoveAll(config.RootDir) + cfg := config.ResetTestRoot("node_app_version_test") + defer os.RemoveAll(cfg.RootDir) // create node - n := getTestNode(t, config, log.TestingLogger()) + n := getTestNode(t, cfg, log.TestingLogger()) // default config uses the kvstore app var appVersion uint64 = kvstore.ProtocolVersion @@ -134,9 +146,9 @@ func TestNodeSetAppVersion(t *testing.T) { func TestNodeSetPrivValTCP(t *testing.T) { addr := "tcp://" + testFreeAddr(t) - config := cfg.ResetTestRoot("node_priv_val_tcp_test") - defer os.RemoveAll(config.RootDir) - config.PrivValidator.ListenAddr = addr + cfg := config.ResetTestRoot("node_priv_val_tcp_test") + defer os.RemoveAll(cfg.RootDir) + cfg.PrivValidator.ListenAddr = addr dialer := privval.DialTCPFn(addr, 100*time.Millisecond, ed25519.GenPrivKey()) dialerEndpoint := privval.NewSignerDialerEndpoint( @@ -147,7 +159,7 @@ func TestNodeSetPrivValTCP(t *testing.T) { signerServer := privval.NewSignerServer( dialerEndpoint, - config.ChainID(), + cfg.ChainID(), types.NewMockPV(), ) @@ -159,7 +171,7 @@ func TestNodeSetPrivValTCP(t *testing.T) { }() defer signerServer.Stop() //nolint:errcheck // ignore for tests - n := getTestNode(t, config, log.TestingLogger()) + n := getTestNode(t, cfg, log.TestingLogger()) assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator()) } @@ -167,21 +179,26 @@ func TestNodeSetPrivValTCP(t *testing.T) { func TestPrivValidatorListenAddrNoProtocol(t *testing.T) { addrNoPrefix := testFreeAddr(t) - config := cfg.ResetTestRoot("node_priv_val_tcp_test") - defer os.RemoveAll(config.RootDir) - config.PrivValidator.ListenAddr = addrNoPrefix + cfg := config.ResetTestRoot("node_priv_val_tcp_test") + defer os.RemoveAll(cfg.RootDir) + cfg.PrivValidator.ListenAddr = addrNoPrefix - _, err := newDefaultNode(config, log.TestingLogger()) + n, err := newDefaultNode(cfg, log.TestingLogger()) assert.Error(t, err) + + if n != nil && n.IsRunning() { + assert.NoError(t, n.Stop()) + n.Wait() + } } func TestNodeSetPrivValIPC(t *testing.T) { tmpfile := "/tmp/kms." + tmrand.Str(6) + ".sock" defer os.Remove(tmpfile) // clean up - config := cfg.ResetTestRoot("node_priv_val_tcp_test") - defer os.RemoveAll(config.RootDir) - config.PrivValidator.ListenAddr = "unix://" + tmpfile + cfg := config.ResetTestRoot("node_priv_val_tcp_test") + defer os.RemoveAll(cfg.RootDir) + cfg.PrivValidator.ListenAddr = "unix://" + tmpfile dialer := privval.DialUnixFn(tmpfile) dialerEndpoint := privval.NewSignerDialerEndpoint( @@ -192,7 +209,7 @@ func TestNodeSetPrivValIPC(t *testing.T) { pvsc := privval.NewSignerServer( dialerEndpoint, - config.ChainID(), + cfg.ChainID(), types.NewMockPV(), ) @@ -201,7 +218,7 @@ func TestNodeSetPrivValIPC(t *testing.T) { require.NoError(t, err) }() defer pvsc.Stop() //nolint:errcheck // ignore for tests - n := getTestNode(t, config, log.TestingLogger()) + n := getTestNode(t, cfg, log.TestingLogger()) assert.IsType(t, &privval.RetrySignerClient{}, n.PrivValidator()) } @@ -217,10 +234,13 @@ func testFreeAddr(t *testing.T) string { // create a proposal block using real and full // mempool and evidence pool and validate it. func TestCreateProposalBlock(t *testing.T) { - config := cfg.ResetTestRoot("node_create_proposal") - defer os.RemoveAll(config.RootDir) - cc := proxy.NewLocalClientCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := config.ResetTestRoot("node_create_proposal") + defer os.RemoveAll(cfg.RootDir) + cc := abciclient.NewLocalCreator(kvstore.NewApplication()) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests @@ -228,7 +248,7 @@ func TestCreateProposalBlock(t *testing.T) { logger := log.TestingLogger() const height int64 = 1 - state, stateDB, privVals := state(1, height) + state, stateDB, privVals := state(t, 1, height) stateStore := sm.NewStore(stateDB) maxBytes := 16384 const partSize uint32 = 256 @@ -238,7 +258,7 @@ func TestCreateProposalBlock(t *testing.T) { proposerAddr, _ := state.Validators.GetByIndex(0) mp := mempoolv0.NewCListMempool( - config.Mempool, + cfg.Mempool, proxyApp.Mempool(), state.LastBlockHeight, mempoolv0.WithMetrics(mempool.NopMetrics()), @@ -255,7 +275,7 @@ func TestCreateProposalBlock(t *testing.T) { // fill the evidence pool with more evidence // than can fit in a block - var currentBytes int64 = 0 + var currentBytes int64 for currentBytes <= maxEvidenceBytes { ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, time.Now(), privVals[0], "test-chain") currentBytes += int64(len(ev.Bytes())) @@ -272,7 +292,7 @@ func TestCreateProposalBlock(t *testing.T) { txLength := 100 for i := 0; i <= maxBytes/txLength; i++ { tx := tmrand.Bytes(txLength) - err := mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + err := mp.CheckTx(ctx, tx, nil, mempool.TxInfo{}) assert.NoError(t, err) } @@ -309,10 +329,13 @@ func TestCreateProposalBlock(t *testing.T) { } func TestMaxTxsProposalBlockSize(t *testing.T) { - config := cfg.ResetTestRoot("node_create_proposal") - defer os.RemoveAll(config.RootDir) - cc := proxy.NewLocalClientCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := config.ResetTestRoot("node_create_proposal") + defer os.RemoveAll(cfg.RootDir) + cc := abciclient.NewLocalCreator(kvstore.NewApplication()) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests @@ -320,7 +343,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { logger := log.TestingLogger() const height int64 = 1 - state, stateDB, _ := state(1, height) + state, stateDB, _ := state(t, 1, height) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) const maxBytes int64 = 16384 @@ -330,7 +353,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { // Make Mempool mp := mempoolv0.NewCListMempool( - config.Mempool, + cfg.Mempool, proxyApp.Mempool(), state.LastBlockHeight, mempoolv0.WithMetrics(mempool.NopMetrics()), @@ -342,7 +365,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { // fill the mempool with one txs just below the maximum size txLength := int(types.MaxDataBytesNoEvidence(maxBytes, 1)) tx := tmrand.Bytes(txLength - 4) // to account for the varint - err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{}) assert.NoError(t, err) blockExec := sm.NewBlockExecutor( @@ -371,17 +394,20 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { } func TestMaxProposalBlockSize(t *testing.T) { - config := cfg.ResetTestRoot("node_create_proposal") - defer os.RemoveAll(config.RootDir) - cc := proxy.NewLocalClientCreator(kvstore.NewApplication()) - proxyApp := proxy.NewAppConns(cc) + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := config.ResetTestRoot("node_create_proposal") + defer os.RemoveAll(cfg.RootDir) + cc := abciclient.NewLocalCreator(kvstore.NewApplication()) + proxyApp := proxy.NewAppConns(cc, proxy.NopMetrics()) err := proxyApp.Start() require.Nil(t, err) defer proxyApp.Stop() //nolint:errcheck // ignore for tests logger := log.TestingLogger() - state, stateDB, _ := state(types.MaxVotesCount, int64(1)) + state, stateDB, _ := state(t, types.MaxVotesCount, int64(1)) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) const maxBytes int64 = 1024 * 1024 * 2 @@ -390,7 +416,7 @@ func TestMaxProposalBlockSize(t *testing.T) { // Make Mempool mp := mempoolv0.NewCListMempool( - config.Mempool, + cfg.Mempool, proxyApp.Mempool(), state.LastBlockHeight, mempoolv0.WithMetrics(mempool.NopMetrics()), @@ -408,7 +434,7 @@ func TestMaxProposalBlockSize(t *testing.T) { // At the end of the test, only the single big tx should be added for i := 0; i < 10; i++ { tx := tmrand.Bytes(10) - err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) + err = mp.CheckTx(ctx, tx, nil, mempool.TxInfo{}) assert.NoError(t, err) } @@ -486,115 +512,153 @@ func TestMaxProposalBlockSize(t *testing.T) { } func TestNodeNewSeedNode(t *testing.T) { - config := cfg.ResetTestRoot("node_new_node_custom_reactors_test") - config.Mode = cfg.ModeSeed - defer os.RemoveAll(config.RootDir) + cfg := config.ResetTestRoot("node_new_node_custom_reactors_test") + cfg.Mode = config.ModeSeed + defer os.RemoveAll(cfg.RootDir) - nodeKey, err := types.LoadOrGenNodeKey(config.NodeKeyFile()) + nodeKey, err := types.LoadOrGenNodeKey(cfg.NodeKeyFile()) require.NoError(t, err) - ns, err := makeSeedNode(config, - cfg.DefaultDBProvider, + ns, err := makeSeedNode(cfg, + config.DefaultDBProvider, nodeKey, - defaultGenesisDocProviderFunc(config), + defaultGenesisDocProviderFunc(cfg), log.TestingLogger(), ) + require.NoError(t, err) n, ok := ns.(*nodeImpl) require.True(t, ok) err = n.Start() require.NoError(t, err) - assert.True(t, n.pexReactor.IsRunning()) + + require.NoError(t, n.Stop()) + } func TestNodeSetEventSink(t *testing.T) { - config := cfg.ResetTestRoot("node_app_version_test") - defer os.RemoveAll(config.RootDir) + cfg := config.ResetTestRoot("node_app_version_test") + defer os.RemoveAll(cfg.RootDir) - n := getTestNode(t, config, log.TestingLogger()) + logger := log.TestingLogger() + setupTest := func(t *testing.T, conf *config.Config) []indexer.EventSink { + eventBus, err := createAndStartEventBus(logger) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, eventBus.Stop()) }) + genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) + require.NoError(t, err) - assert.Equal(t, 1, len(n.eventSinks)) - assert.Equal(t, indexer.KV, n.eventSinks[0].Type()) + indexService, eventSinks, err := createAndStartIndexerService(cfg, + config.DefaultDBProvider, eventBus, logger, genDoc.ChainID) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, indexService.Stop()) }) + return eventSinks + } + cleanup := func(ns service.Service) func() { + return func() { + n, ok := ns.(*nodeImpl) + if !ok { + return + } + if n == nil { + return + } + if !n.IsRunning() { + return + } + assert.NoError(t, n.Stop()) + n.Wait() + } + } - config.TxIndex.Indexer = []string{"null"} - n = getTestNode(t, config, log.TestingLogger()) + eventSinks := setupTest(t, cfg) + assert.Equal(t, 1, len(eventSinks)) + assert.Equal(t, indexer.KV, eventSinks[0].Type()) - assert.Equal(t, 1, len(n.eventSinks)) - assert.Equal(t, indexer.NULL, n.eventSinks[0].Type()) + cfg.TxIndex.Indexer = []string{"null"} + eventSinks = setupTest(t, cfg) - config.TxIndex.Indexer = []string{"null", "kv"} - n = getTestNode(t, config, log.TestingLogger()) + assert.Equal(t, 1, len(eventSinks)) + assert.Equal(t, indexer.NULL, eventSinks[0].Type()) - assert.Equal(t, 1, len(n.eventSinks)) - assert.Equal(t, indexer.NULL, n.eventSinks[0].Type()) + cfg.TxIndex.Indexer = []string{"null", "kv"} + eventSinks = setupTest(t, cfg) - config.TxIndex.Indexer = []string{"kvv"} - ns, err := newDefaultNode(config, log.TestingLogger()) + assert.Equal(t, 1, len(eventSinks)) + assert.Equal(t, indexer.NULL, eventSinks[0].Type()) + + cfg.TxIndex.Indexer = []string{"kvv"} + ns, err := newDefaultNode(cfg, logger) assert.Nil(t, ns) - assert.Equal(t, errors.New("unsupported event sink type"), err) + assert.Contains(t, err.Error(), "unsupported event sink type") + t.Cleanup(cleanup(ns)) - config.TxIndex.Indexer = []string{} - n = getTestNode(t, config, log.TestingLogger()) + cfg.TxIndex.Indexer = []string{} + eventSinks = setupTest(t, cfg) - assert.Equal(t, 1, len(n.eventSinks)) - assert.Equal(t, indexer.NULL, n.eventSinks[0].Type()) + assert.Equal(t, 1, len(eventSinks)) + assert.Equal(t, indexer.NULL, eventSinks[0].Type()) - config.TxIndex.Indexer = []string{"psql"} - ns, err = newDefaultNode(config, log.TestingLogger()) + cfg.TxIndex.Indexer = []string{"psql"} + ns, err = newDefaultNode(cfg, logger) assert.Nil(t, ns) - assert.Equal(t, errors.New("the psql connection settings cannot be empty"), err) + assert.Contains(t, err.Error(), "the psql connection settings cannot be empty") + t.Cleanup(cleanup(ns)) var psqlConn = "test" - config.TxIndex.Indexer = []string{"psql"} - config.TxIndex.PsqlConn = psqlConn - n = getTestNode(t, config, log.TestingLogger()) - assert.Equal(t, 1, len(n.eventSinks)) - assert.Equal(t, indexer.PSQL, n.eventSinks[0].Type()) - n.OnStop() + cfg.TxIndex.Indexer = []string{"psql"} + cfg.TxIndex.PsqlConn = psqlConn + eventSinks = setupTest(t, cfg) - config.TxIndex.Indexer = []string{"psql", "kv"} - config.TxIndex.PsqlConn = psqlConn - n = getTestNode(t, config, log.TestingLogger()) - assert.Equal(t, 2, len(n.eventSinks)) + assert.Equal(t, 1, len(eventSinks)) + assert.Equal(t, indexer.PSQL, eventSinks[0].Type()) + + cfg.TxIndex.Indexer = []string{"psql", "kv"} + cfg.TxIndex.PsqlConn = psqlConn + eventSinks = setupTest(t, cfg) + + assert.Equal(t, 2, len(eventSinks)) // we use map to filter the duplicated sinks, so it's not guarantee the order when append sinks. - if n.eventSinks[0].Type() == indexer.KV { - assert.Equal(t, indexer.PSQL, n.eventSinks[1].Type()) + if eventSinks[0].Type() == indexer.KV { + assert.Equal(t, indexer.PSQL, eventSinks[1].Type()) } else { - assert.Equal(t, indexer.PSQL, n.eventSinks[0].Type()) - assert.Equal(t, indexer.KV, n.eventSinks[1].Type()) + assert.Equal(t, indexer.PSQL, eventSinks[0].Type()) + assert.Equal(t, indexer.KV, eventSinks[1].Type()) } - n.OnStop() - config.TxIndex.Indexer = []string{"kv", "psql"} - config.TxIndex.PsqlConn = psqlConn - n = getTestNode(t, config, log.TestingLogger()) - assert.Equal(t, 2, len(n.eventSinks)) - if n.eventSinks[0].Type() == indexer.KV { - assert.Equal(t, indexer.PSQL, n.eventSinks[1].Type()) + cfg.TxIndex.Indexer = []string{"kv", "psql"} + cfg.TxIndex.PsqlConn = psqlConn + eventSinks = setupTest(t, cfg) + + assert.Equal(t, 2, len(eventSinks)) + if eventSinks[0].Type() == indexer.KV { + assert.Equal(t, indexer.PSQL, eventSinks[1].Type()) } else { - assert.Equal(t, indexer.PSQL, n.eventSinks[0].Type()) - assert.Equal(t, indexer.KV, n.eventSinks[1].Type()) + assert.Equal(t, indexer.PSQL, eventSinks[0].Type()) + assert.Equal(t, indexer.KV, eventSinks[1].Type()) } - n.OnStop() var e = errors.New("found duplicated sinks, please check the tx-index section in the config.toml") - config.TxIndex.Indexer = []string{"psql", "kv", "Kv"} - config.TxIndex.PsqlConn = psqlConn - _, err = newDefaultNode(config, log.TestingLogger()) + cfg.TxIndex.Indexer = []string{"psql", "kv", "Kv"} + cfg.TxIndex.PsqlConn = psqlConn + ns, err = newDefaultNode(cfg, logger) require.Error(t, err) - assert.Equal(t, e, err) + assert.Contains(t, err.Error(), e.Error()) + t.Cleanup(cleanup(ns)) - config.TxIndex.Indexer = []string{"Psql", "kV", "kv", "pSql"} - config.TxIndex.PsqlConn = psqlConn - _, err = newDefaultNode(config, log.TestingLogger()) + cfg.TxIndex.Indexer = []string{"Psql", "kV", "kv", "pSql"} + cfg.TxIndex.PsqlConn = psqlConn + ns, err = newDefaultNode(cfg, logger) require.Error(t, err) - assert.Equal(t, e, err) + assert.Contains(t, err.Error(), e.Error()) + t.Cleanup(cleanup(ns)) } -func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { +func state(t *testing.T, nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { + t.Helper() privVals := make([]types.PrivValidator, nVals) vals := make([]types.GenesisValidator, nVals) for i := 0; i < nVals; i++ { @@ -615,17 +679,15 @@ func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { // save validators to db for 2 heights stateDB := dbm.NewMemDB() + t.Cleanup(func() { require.NoError(t, stateDB.Close()) }) + stateStore := sm.NewStore(stateDB) - if err := stateStore.Save(s); err != nil { - panic(err) - } + require.NoError(t, stateStore.Save(s)) for i := 1; i < int(height); i++ { s.LastBlockHeight++ s.LastValidators = s.Validators.Copy() - if err := stateStore.Save(s); err != nil { - panic(err) - } + require.NoError(t, stateStore.Save(s)) } return s, stateDB, privVals } @@ -639,13 +701,13 @@ func loadStatefromGenesis(t *testing.T) sm.State { stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - config := cfg.ResetTestRoot("load_state_from_genesis") + cfg := config.ResetTestRoot("load_state_from_genesis") loadedState, err := stateStore.Load() require.NoError(t, err) require.True(t, loadedState.IsEmpty()) - genDoc, _ := factory.RandGenesisDoc(config, 0, false, 10) + genDoc, _ := factory.RandGenesisDoc(cfg, 0, false, 10) state, err := loadStateFromDBOrGenesisDocProvider( stateStore, @@ -656,65 +718,3 @@ func loadStatefromGenesis(t *testing.T) sm.State { return state } - -func TestNodeStartStateSync(t *testing.T) { - mockSSR := &statesync.MockSyncReactor{} - mockFSR := &consmocks.BlockSyncReactor{} - mockCSR := &consmocks.ConsSyncReactor{} - mockSP := &ssmocks.StateProvider{} - state := loadStatefromGenesis(t) - config := cfg.ResetTestRoot("load_state_from_genesis") - - eventBus, err := createAndStartEventBus(log.TestingLogger()) - defer func() { - err := eventBus.Stop() - require.NoError(t, err) - }() - - require.NoError(t, err) - require.NotNil(t, eventBus) - - sub, err := eventBus.Subscribe(context.Background(), "test-client", types.EventQueryStateSyncStatus, 10) - require.NoError(t, err) - require.NotNil(t, sub) - - cfgSS := config.StateSync - - mockSSR.On("Sync", context.TODO(), mockSP, cfgSS.DiscoveryTime).Return(state, nil). - On("Backfill", state).Return(nil) - mockCSR.On("SetStateSyncingMetrics", float64(0)).Return(). - On("SwitchToConsensus", state, true).Return() - - require.NoError(t, - startStateSync(mockSSR, mockFSR, mockCSR, mockSP, config.StateSync, false, state.InitialHeight, eventBus)) - - for cnt := 0; cnt < 2; { - select { - case <-time.After(3 * time.Second): - t.Errorf("StateSyncStatus timeout") - case msg := <-sub.Out(): - if cnt == 0 { - ensureStateSyncStatus(t, msg, false, state.InitialHeight) - cnt++ - } else { - // the state height = 0 because we are not actually update the state in this test - ensureStateSyncStatus(t, msg, true, 0) - cnt++ - } - } - } - - mockSSR.AssertNumberOfCalls(t, "Sync", 1) - mockSSR.AssertNumberOfCalls(t, "Backfill", 1) - mockCSR.AssertNumberOfCalls(t, "SetStateSyncingMetrics", 1) - mockCSR.AssertNumberOfCalls(t, "SwitchToConsensus", 1) -} - -func ensureStateSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, height int64) { - t.Helper() - status, ok := msg.Data().(types.EventDataStateSyncStatus) - - require.True(t, ok) - require.Equal(t, complete, status.Complete) - require.Equal(t, height, status.Height) -} diff --git a/node/public.go b/node/public.go index 99a8226d0..c616eebac 100644 --- a/node/public.go +++ b/node/public.go @@ -4,11 +4,11 @@ package node import ( "fmt" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" ) @@ -28,7 +28,7 @@ func NewDefault(conf *config.Config, logger log.Logger) (service.Service, error) // value of the final argument. func New(conf *config.Config, logger log.Logger, - cf proxy.ClientCreator, + cf abciclient.Creator, gen *types.GenesisDoc, ) (service.Service, error) { nodeKey, err := types.LoadOrGenNodeKey(conf.NodeKeyFile()) diff --git a/node/setup.go b/node/setup.go index bbd9536d9..abeb8ce1d 100644 --- a/node/setup.go +++ b/node/setup.go @@ -2,59 +2,96 @@ package node import ( "bytes" - "context" "errors" "fmt" - "math" - "net" - _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port "strings" "time" dbm "github.com/tendermint/tm-db" - abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + abciclient "github.com/tendermint/tendermint/abci/client" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" - bcv0 "github.com/tendermint/tendermint/internal/blocksync/v0" - bcv2 "github.com/tendermint/tendermint/internal/blocksync/v2" - cs "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/blocksync" + "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" mempoolv1 "github.com/tendermint/tendermint/internal/mempool/v1" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/internal/p2p/pex" + "github.com/tendermint/tendermint/internal/proxy" + sm "github.com/tendermint/tendermint/internal/state" + "github.com/tendermint/tendermint/internal/state/indexer" + "github.com/tendermint/tendermint/internal/state/indexer/sink" "github.com/tendermint/tendermint/internal/statesync" + "github.com/tendermint/tendermint/internal/store" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" tmstrings "github.com/tendermint/tendermint/libs/strings" - protop2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/indexer" - kv "github.com/tendermint/tendermint/state/indexer/sink/kv" - null "github.com/tendermint/tendermint/state/indexer/sink/null" - psql "github.com/tendermint/tendermint/state/indexer/sink/psql" - "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" + + _ "net/http/pprof" // nolint: gosec // securely exposed on separate, optional port ) -func initDBs(config *cfg.Config, dbProvider cfg.DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { - var blockStoreDB dbm.DB - blockStoreDB, err = dbProvider(&cfg.DBContext{ID: "blockstore", Config: config}) - if err != nil { - return - } - blockStore = store.NewBlockStore(blockStoreDB) +type closer func() error - stateDB, err = dbProvider(&cfg.DBContext{ID: "state", Config: config}) - return +func makeCloser(cs []closer) closer { + return func() error { + errs := make([]string, 0, len(cs)) + for _, cl := range cs { + if err := cl(); err != nil { + errs = append(errs, err.Error()) + } + } + if len(errs) >= 0 { + return errors.New(strings.Join(errs, "; ")) + } + return nil + } } -func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.Logger) (proxy.AppConns, error) { - proxyApp := proxy.NewAppConns(clientCreator) +func combineCloseError(err error, cl closer) error { + if err == nil { + return cl() + } + + clerr := cl() + if clerr == nil { + return err + } + + return fmt.Errorf("error=%q closerError=%q", err.Error(), clerr.Error()) +} + +func initDBs( + cfg *config.Config, + dbProvider config.DBProvider, +) (*store.BlockStore, dbm.DB, closer, error) { + + blockStoreDB, err := dbProvider(&config.DBContext{ID: "blockstore", Config: cfg}) + if err != nil { + return nil, nil, func() error { return nil }, err + } + closers := []closer{} + blockStore := store.NewBlockStore(blockStoreDB) + closers = append(closers, blockStoreDB.Close) + + stateDB, err := dbProvider(&config.DBContext{ID: "state", Config: cfg}) + if err != nil { + return nil, nil, makeCloser(closers), err + } + + closers = append(closers, stateDB.Close) + + return blockStore, stateDB, makeCloser(closers), nil +} + +// nolint:lll +func createAndStartProxyAppConns(clientCreator abciclient.Creator, logger log.Logger, metrics *proxy.Metrics) (proxy.AppConns, error) { + proxyApp := proxy.NewAppConns(clientCreator, metrics) proxyApp.SetLogger(logger.With("module", "proxy")) if err := proxyApp.Start(); err != nil { return nil, fmt.Errorf("error starting proxy app connections: %v", err) @@ -72,62 +109,15 @@ func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) { } func createAndStartIndexerService( - config *cfg.Config, - dbProvider cfg.DBProvider, + cfg *config.Config, + dbProvider config.DBProvider, eventBus *types.EventBus, logger log.Logger, chainID string, ) (*indexer.Service, []indexer.EventSink, error) { - - eventSinks := []indexer.EventSink{} - - // check for duplicated sinks - sinks := map[string]bool{} - for _, s := range config.TxIndex.Indexer { - sl := strings.ToLower(s) - if sinks[sl] { - return nil, nil, errors.New("found duplicated sinks, please check the tx-index section in the config.toml") - } - - sinks[sl] = true - } - -loop: - for k := range sinks { - switch k { - case string(indexer.NULL): - // When we see null in the config, the eventsinks will be reset with the - // nullEventSink. - eventSinks = []indexer.EventSink{null.NewEventSink()} - break loop - - case string(indexer.KV): - store, err := dbProvider(&cfg.DBContext{ID: "tx_index", Config: config}) - if err != nil { - return nil, nil, err - } - - eventSinks = append(eventSinks, kv.NewEventSink(store)) - - case string(indexer.PSQL): - conn := config.TxIndex.PsqlConn - if conn == "" { - return nil, nil, errors.New("the psql connection settings cannot be empty") - } - - es, _, err := psql.NewEventSink(conn, chainID) - if err != nil { - return nil, nil, err - } - eventSinks = append(eventSinks, es) - - default: - return nil, nil, errors.New("unsupported event sink type") - } - } - - if len(eventSinks) == 0 { - eventSinks = []indexer.EventSink{null.NewEventSink()} + eventSinks, err := sink.EventSinksFromConfig(cfg, dbProvider, chainID) + if err != nil { + return nil, nil, err } indexerService := indexer.NewIndexerService(eventSinks, eventBus) @@ -157,9 +147,9 @@ func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger log.Logger, ) } switch { - case mode == cfg.ModeFull: + case mode == config.ModeFull: logger.Info("This node is a fullnode") - case mode == cfg.ModeValidator: + case mode == config.ModeValidator: addr := pubKey.Address() // Log whether this node is a validator or an observer if state.Validators.HasAddress(addr) { @@ -180,36 +170,27 @@ func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool { } func createMempoolReactor( - config *cfg.Config, + cfg *config.Config, proxyApp proxy.AppConns, state sm.State, memplMetrics *mempool.Metrics, peerManager *p2p.PeerManager, router *p2p.Router, logger log.Logger, -) (*p2p.ReactorShim, service.Service, mempool.Mempool, error) { +) (service.Service, mempool.Mempool, error) { - logger = logger.With("module", "mempool", "version", config.Mempool.Version) - channelShims := mempoolv0.GetChannelShims(config.Mempool) - reactorShim := p2p.NewReactorShim(logger, "MempoolShim", channelShims) + logger = logger.With("module", "mempool", "version", cfg.Mempool.Version) + peerUpdates := peerManager.Subscribe() - var ( - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) + switch cfg.Mempool.Version { + case config.MempoolV0: + ch, err := router.OpenChannel(mempoolv0.GetChannelDescriptor(cfg.Mempool)) + if err != nil { + return nil, nil, err + } - if config.P2P.DisableLegacy { - channels = makeChannelsFromShims(router, channelShims) - peerUpdates = peerManager.Subscribe() - } else { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates - } - - switch config.Mempool.Version { - case cfg.MempoolV0: mp := mempoolv0.NewCListMempool( - config.Mempool, + cfg.Mempool, proxyApp.Mempool(), state.LastBlockHeight, mempoolv0.WithMetrics(memplMetrics), @@ -221,23 +202,28 @@ func createMempoolReactor( reactor := mempoolv0.NewReactor( logger, - config.Mempool, + cfg.Mempool, peerManager, mp, - channels[mempool.MempoolChannel], + ch, peerUpdates, ) - if config.Consensus.WaitForTxs() { + if cfg.Consensus.WaitForTxs() { mp.EnableTxsAvailable() } - return reactorShim, reactor, mp, nil + return reactor, mp, nil + + case config.MempoolV1: + ch, err := router.OpenChannel(mempoolv1.GetChannelDescriptor(cfg.Mempool)) + if err != nil { + return nil, nil, err + } - case cfg.MempoolV1: mp := mempoolv1.NewTxMempool( logger, - config.Mempool, + cfg.Mempool, proxyApp.Mempool(), state.LastBlockHeight, mempoolv1.WithMetrics(memplMetrics), @@ -247,227 +233,182 @@ func createMempoolReactor( reactor := mempoolv1.NewReactor( logger, - config.Mempool, + cfg.Mempool, peerManager, mp, - channels[mempool.MempoolChannel], + ch, peerUpdates, ) - if config.Consensus.WaitForTxs() { + if cfg.Consensus.WaitForTxs() { mp.EnableTxsAvailable() } - return reactorShim, reactor, mp, nil + return reactor, mp, nil default: - return nil, nil, nil, fmt.Errorf("unknown mempool version: %s", config.Mempool.Version) + return nil, nil, fmt.Errorf("unknown mempool version: %s", cfg.Mempool.Version) } } func createEvidenceReactor( - config *cfg.Config, - dbProvider cfg.DBProvider, + cfg *config.Config, + dbProvider config.DBProvider, stateDB dbm.DB, blockStore *store.BlockStore, peerManager *p2p.PeerManager, router *p2p.Router, logger log.Logger, -) (*p2p.ReactorShim, *evidence.Reactor, *evidence.Pool, error) { - evidenceDB, err := dbProvider(&cfg.DBContext{ID: "evidence", Config: config}) +) (*evidence.Reactor, *evidence.Pool, error) { + evidenceDB, err := dbProvider(&config.DBContext{ID: "evidence", Config: cfg}) if err != nil { - return nil, nil, nil, err + return nil, nil, err } logger = logger.With("module", "evidence") - reactorShim := p2p.NewReactorShim(logger, "EvidenceShim", evidence.ChannelShims) evidencePool, err := evidence.NewPool(logger, evidenceDB, sm.NewStore(stateDB), blockStore) if err != nil { - return nil, nil, nil, fmt.Errorf("creating evidence pool: %w", err) + return nil, nil, fmt.Errorf("creating evidence pool: %w", err) } - var ( - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) - - if config.P2P.DisableLegacy { - channels = makeChannelsFromShims(router, evidence.ChannelShims) - peerUpdates = peerManager.Subscribe() - } else { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates + ch, err := router.OpenChannel(evidence.GetChannelDescriptor()) + if err != nil { + return nil, nil, fmt.Errorf("creating evidence channel: %w", err) } evidenceReactor := evidence.NewReactor( logger, - channels[evidence.EvidenceChannel], - peerUpdates, + ch, + peerManager.Subscribe(), evidencePool, ) - return reactorShim, evidenceReactor, evidencePool, nil + return evidenceReactor, evidencePool, nil } func createBlockchainReactor( logger log.Logger, - config *cfg.Config, state sm.State, blockExec *sm.BlockExecutor, blockStore *store.BlockStore, - csReactor *cs.Reactor, + csReactor *consensus.Reactor, peerManager *p2p.PeerManager, router *p2p.Router, blockSync bool, - metrics *cs.Metrics, -) (*p2p.ReactorShim, service.Service, error) { + metrics *consensus.Metrics, +) (service.Service, error) { logger = logger.With("module", "blockchain") - switch config.BlockSync.Version { - case cfg.BlockSyncV0: - reactorShim := p2p.NewReactorShim(logger, "BlockchainShim", bcv0.ChannelShims) - - var ( - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) - - if config.P2P.DisableLegacy { - channels = makeChannelsFromShims(router, bcv0.ChannelShims) - peerUpdates = peerManager.Subscribe() - } else { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates - } - - reactor, err := bcv0.NewReactor( - logger, state.Copy(), blockExec, blockStore, csReactor, - channels[bcv0.BlockchainChannel], peerUpdates, blockSync, - metrics, - ) - if err != nil { - return nil, nil, err - } - - return reactorShim, reactor, nil - - case cfg.BlockSyncV2: - return nil, nil, errors.New("block sync version v2 is no longer supported. Please use v0") - - default: - return nil, nil, fmt.Errorf("unknown block sync version %s", config.BlockSync.Version) + ch, err := router.OpenChannel(blocksync.GetChannelDescriptor()) + if err != nil { + return nil, err } + + peerUpdates := peerManager.Subscribe() + + reactor, err := blocksync.NewReactor( + logger, state.Copy(), blockExec, blockStore, csReactor, + ch, peerUpdates, blockSync, + metrics, + ) + if err != nil { + return nil, err + } + + return reactor, nil } func createConsensusReactor( - config *cfg.Config, + cfg *config.Config, state sm.State, blockExec *sm.BlockExecutor, blockStore sm.BlockStore, mp mempool.Mempool, evidencePool *evidence.Pool, privValidator types.PrivValidator, - csMetrics *cs.Metrics, + csMetrics *consensus.Metrics, waitSync bool, eventBus *types.EventBus, peerManager *p2p.PeerManager, router *p2p.Router, logger log.Logger, -) (*p2p.ReactorShim, *cs.Reactor, *cs.State) { +) (*consensus.Reactor, *consensus.State, error) { - consensusState := cs.NewState( - config.Consensus, + consensusState := consensus.NewState( + cfg.Consensus, state.Copy(), blockExec, blockStore, mp, evidencePool, - cs.StateMetrics(csMetrics), + consensus.StateMetrics(csMetrics), ) consensusState.SetLogger(logger) - if privValidator != nil && config.Mode == cfg.ModeValidator { + if privValidator != nil && cfg.Mode == config.ModeValidator { consensusState.SetPrivValidator(privValidator) } - reactorShim := p2p.NewReactorShim(logger, "ConsensusShim", cs.ChannelShims) + csChDesc := consensus.GetChannelDescriptors() + channels := make(map[p2p.ChannelID]*p2p.Channel, len(csChDesc)) + for idx := range csChDesc { + chd := csChDesc[idx] + ch, err := router.OpenChannel(chd) + if err != nil { + return nil, nil, err + } - var ( - channels map[p2p.ChannelID]*p2p.Channel - peerUpdates *p2p.PeerUpdates - ) - - if config.P2P.DisableLegacy { - channels = makeChannelsFromShims(router, cs.ChannelShims) - peerUpdates = peerManager.Subscribe() - } else { - channels = getChannelsFromShim(reactorShim) - peerUpdates = reactorShim.PeerUpdates + channels[ch.ID] = ch } - reactor := cs.NewReactor( + peerUpdates := peerManager.Subscribe() + + reactor := consensus.NewReactor( logger, consensusState, - channels[cs.StateChannel], - channels[cs.DataChannel], - channels[cs.VoteChannel], - channels[cs.VoteSetBitsChannel], + channels[consensus.StateChannel], + channels[consensus.DataChannel], + channels[consensus.VoteChannel], + channels[consensus.VoteSetBitsChannel], peerUpdates, waitSync, - cs.ReactorMetrics(csMetrics), + consensus.ReactorMetrics(csMetrics), ) // Services which will be publishing and/or subscribing for messages (events) // consensusReactor will set it on consensusState and blockExecutor. reactor.SetEventBus(eventBus) - return reactorShim, reactor, consensusState + return reactor, consensusState, nil } -func createTransport(logger log.Logger, config *cfg.Config) *p2p.MConnTransport { +func createTransport(logger log.Logger, cfg *config.Config) *p2p.MConnTransport { return p2p.NewMConnTransport( - logger, p2p.MConnConfig(config.P2P), []*p2p.ChannelDescriptor{}, + logger, conn.DefaultMConnConfig(), []*p2p.ChannelDescriptor{}, p2p.MConnTransportOptions{ - MaxAcceptedConnections: uint32(config.P2P.MaxNumInboundPeers + - len(tmstrings.SplitAndTrimEmpty(config.P2P.UnconditionalPeerIDs, ",", " ")), - ), + MaxAcceptedConnections: uint32(cfg.P2P.MaxConnections), }, ) } func createPeerManager( - config *cfg.Config, - dbProvider cfg.DBProvider, - p2pLogger log.Logger, + cfg *config.Config, + dbProvider config.DBProvider, nodeID types.NodeID, -) (*p2p.PeerManager, error) { +) (*p2p.PeerManager, closer, error) { var maxConns uint16 switch { - case config.P2P.MaxConnections > 0: - maxConns = config.P2P.MaxConnections - - case config.P2P.MaxNumInboundPeers > 0 && config.P2P.MaxNumOutboundPeers > 0: - x := config.P2P.MaxNumInboundPeers + config.P2P.MaxNumOutboundPeers - if x > math.MaxUint16 { - return nil, fmt.Errorf( - "max inbound peers (%d) + max outbound peers (%d) exceeds maximum (%d)", - config.P2P.MaxNumInboundPeers, - config.P2P.MaxNumOutboundPeers, - math.MaxUint16, - ) - } - - maxConns = uint16(x) - + case cfg.P2P.MaxConnections > 0: + maxConns = cfg.P2P.MaxConnections default: maxConns = 64 } privatePeerIDs := make(map[types.NodeID]struct{}) - for _, id := range tmstrings.SplitAndTrimEmpty(config.P2P.PrivatePeerIDs, ",", " ") { + for _, id := range tmstrings.SplitAndTrimEmpty(cfg.P2P.PrivatePeerIDs, ",", " ") { privatePeerIDs[types.NodeID(id)] = struct{}{} } @@ -483,41 +424,41 @@ func createPeerManager( } peers := []p2p.NodeAddress{} - for _, p := range tmstrings.SplitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ") { + for _, p := range tmstrings.SplitAndTrimEmpty(cfg.P2P.PersistentPeers, ",", " ") { address, err := p2p.ParseNodeAddress(p) if err != nil { - return nil, fmt.Errorf("invalid peer address %q: %w", p, err) + return nil, func() error { return nil }, fmt.Errorf("invalid peer address %q: %w", p, err) } peers = append(peers, address) options.PersistentPeers = append(options.PersistentPeers, address.NodeID) } - for _, p := range tmstrings.SplitAndTrimEmpty(config.P2P.BootstrapPeers, ",", " ") { + for _, p := range tmstrings.SplitAndTrimEmpty(cfg.P2P.BootstrapPeers, ",", " ") { address, err := p2p.ParseNodeAddress(p) if err != nil { - return nil, fmt.Errorf("invalid peer address %q: %w", p, err) + return nil, func() error { return nil }, fmt.Errorf("invalid peer address %q: %w", p, err) } peers = append(peers, address) } - peerDB, err := dbProvider(&cfg.DBContext{ID: "peerstore", Config: config}) + peerDB, err := dbProvider(&config.DBContext{ID: "peerstore", Config: cfg}) if err != nil { - return nil, err + return nil, func() error { return nil }, err } peerManager, err := p2p.NewPeerManager(nodeID, peerDB, options) if err != nil { - return nil, fmt.Errorf("failed to create peer manager: %w", err) + return nil, peerDB.Close, fmt.Errorf("failed to create peer manager: %w", err) } for _, peer := range peers { if _, err := peerManager.Add(peer); err != nil { - return nil, fmt.Errorf("failed to add peer %q: %w", peer, err) + return nil, peerDB.Close, fmt.Errorf("failed to add peer %q: %w", peer, err) } } - return peerManager, nil + return peerManager, peerDB.Close, nil } func createRouter( @@ -541,160 +482,23 @@ func createRouter( ) } -func createSwitch( - config *cfg.Config, - transport p2p.Transport, - p2pMetrics *p2p.Metrics, - mempoolReactor *p2p.ReactorShim, - bcReactor p2p.Reactor, - stateSyncReactor *p2p.ReactorShim, - consensusReactor *p2p.ReactorShim, - evidenceReactor *p2p.ReactorShim, - proxyApp proxy.AppConns, - nodeInfo types.NodeInfo, - nodeKey types.NodeKey, - p2pLogger log.Logger, -) *p2p.Switch { - - var ( - connFilters = []p2p.ConnFilterFunc{} - peerFilters = []p2p.PeerFilterFunc{} - ) - - if !config.P2P.AllowDuplicateIP { - connFilters = append(connFilters, p2p.ConnDuplicateIPFilter) - } - - // Filter peers by addr or pubkey with an ABCI query. - // If the query return code is OK, add peer. - if config.FilterPeers { - connFilters = append( - connFilters, - // ABCI query for address filtering. - func(_ p2p.ConnSet, c net.Conn, _ []net.IP) error { - res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ - Path: fmt.Sprintf("/p2p/filter/addr/%s", c.RemoteAddr().String()), - }) - if err != nil { - return err - } - if res.IsErr() { - return fmt.Errorf("error querying abci app: %v", res) - } - - return nil - }, - ) - - peerFilters = append( - peerFilters, - // ABCI query for ID filtering. - func(_ p2p.IPeerSet, p p2p.Peer) error { - res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ - Path: fmt.Sprintf("/p2p/filter/id/%s", p.ID()), - }) - if err != nil { - return err - } - if res.IsErr() { - return fmt.Errorf("error querying abci app: %v", res) - } - - return nil - }, - ) - } - - sw := p2p.NewSwitch( - config.P2P, - transport, - p2p.WithMetrics(p2pMetrics), - p2p.SwitchPeerFilters(peerFilters...), - p2p.SwitchConnFilters(connFilters...), - ) - - sw.SetLogger(p2pLogger) - if config.Mode != cfg.ModeSeed { - sw.AddReactor("MEMPOOL", mempoolReactor) - sw.AddReactor("BLOCKCHAIN", bcReactor) - sw.AddReactor("CONSENSUS", consensusReactor) - sw.AddReactor("EVIDENCE", evidenceReactor) - sw.AddReactor("STATESYNC", stateSyncReactor) - } - - sw.SetNodeInfo(nodeInfo) - sw.SetNodeKey(nodeKey) - - p2pLogger.Info("P2P Node ID", "ID", nodeKey.ID, "file", config.NodeKeyFile()) - return sw -} - -func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, - p2pLogger log.Logger, nodeKey types.NodeKey) (pex.AddrBook, error) { - - addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) - addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) - - // Add ourselves to addrbook to prevent dialing ourselves - if config.P2P.ExternalAddress != "" { - addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(config.P2P.ExternalAddress)) - if err != nil { - return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err) - } - addrBook.AddOurAddress(addr) - } - if config.P2P.ListenAddress != "" { - addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(config.P2P.ListenAddress)) - if err != nil { - return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err) - } - addrBook.AddOurAddress(addr) - } - - sw.SetAddrBook(addrBook) - - return addrBook, nil -} - -func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, - sw *p2p.Switch, logger log.Logger) *pex.Reactor { - - reactorConfig := &pex.ReactorConfig{ - Seeds: tmstrings.SplitAndTrimEmpty(config.P2P.Seeds, ",", " "), - SeedMode: config.Mode == cfg.ModeSeed, - // See consensus/reactor.go: blocksToContributeToBecomeGoodPeer 10000 - // blocks assuming 10s blocks ~ 28 hours. - // TODO (melekes): make it dynamic based on the actual block latencies - // from the live network. - // https://github.com/tendermint/tendermint/issues/3523 - SeedDisconnectWaitPeriod: 28 * time.Hour, - PersistentPeersMaxDialPeriod: config.P2P.PersistentPeersMaxDialPeriod, - } - // TODO persistent peers ? so we can have their DNS addrs saved - pexReactor := pex.NewReactor(addrBook, reactorConfig) - pexReactor.SetLogger(logger.With("module", "pex")) - sw.AddReactor("PEX", pexReactor) - return pexReactor -} - -func createPEXReactorV2( - config *cfg.Config, +func createPEXReactor( logger log.Logger, peerManager *p2p.PeerManager, router *p2p.Router, -) (*pex.ReactorV2, error) { +) (service.Service, error) { - channel, err := router.OpenChannel(pex.ChannelDescriptor(), &protop2p.PexMessage{}, 128) + channel, err := router.OpenChannel(pex.ChannelDescriptor()) if err != nil { return nil, err } peerUpdates := peerManager.Subscribe() - return pex.NewReactorV2(logger, peerManager, channel, peerUpdates), nil + return pex.NewReactor(logger, peerManager, channel, peerUpdates), nil } func makeNodeInfo( - config *cfg.Config, + cfg *config.Config, nodeKey types.NodeKey, eventSinks []indexer.EventSink, genDoc *types.GenesisDoc, @@ -706,17 +510,7 @@ func makeNodeInfo( txIndexerStatus = "on" } - var bcChannel byte - switch config.BlockSync.Version { - case cfg.BlockSyncV0: - bcChannel = byte(bcv0.BlockchainChannel) - - case cfg.BlockSyncV2: - bcChannel = bcv2.BlockchainChannel - - default: - return types.NodeInfo{}, fmt.Errorf("unknown blocksync version %s", config.BlockSync.Version) - } + bcChannel := byte(blocksync.BlockSyncChannel) nodeInfo := types.NodeInfo{ ProtocolVersion: types.ProtocolVersion{ @@ -729,31 +523,32 @@ func makeNodeInfo( Version: version.TMVersion, Channels: []byte{ bcChannel, - byte(cs.StateChannel), - byte(cs.DataChannel), - byte(cs.VoteChannel), - byte(cs.VoteSetBitsChannel), + byte(consensus.StateChannel), + byte(consensus.DataChannel), + byte(consensus.VoteChannel), + byte(consensus.VoteSetBitsChannel), byte(mempool.MempoolChannel), byte(evidence.EvidenceChannel), byte(statesync.SnapshotChannel), byte(statesync.ChunkChannel), byte(statesync.LightBlockChannel), + byte(statesync.ParamsChannel), }, - Moniker: config.Moniker, + Moniker: cfg.Moniker, Other: types.NodeInfoOther{ TxIndex: txIndexerStatus, - RPCAddress: config.RPC.ListenAddress, + RPCAddress: cfg.RPC.ListenAddress, }, } - if config.P2P.PexReactor { + if cfg.P2P.PexReactor { nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) } - lAddr := config.P2P.ExternalAddress + lAddr := cfg.P2P.ExternalAddress if lAddr == "" { - lAddr = config.P2P.ListenAddress + lAddr = cfg.P2P.ListenAddress } nodeInfo.ListenAddr = lAddr @@ -763,7 +558,7 @@ func makeNodeInfo( } func makeSeedNodeInfo( - config *cfg.Config, + cfg *config.Config, nodeKey types.NodeKey, genDoc *types.GenesisDoc, state sm.State, @@ -778,21 +573,21 @@ func makeSeedNodeInfo( Network: genDoc.ChainID, Version: version.TMVersion, Channels: []byte{}, - Moniker: config.Moniker, + Moniker: cfg.Moniker, Other: types.NodeInfoOther{ TxIndex: "off", - RPCAddress: config.RPC.ListenAddress, + RPCAddress: cfg.RPC.ListenAddress, }, } - if config.P2P.PexReactor { + if cfg.P2P.PexReactor { nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) } - lAddr := config.P2P.ExternalAddress + lAddr := cfg.P2P.ExternalAddress if lAddr == "" { - lAddr = config.P2P.ListenAddress + lAddr = cfg.P2P.ListenAddress } nodeInfo.ListenAddr = lAddr diff --git a/privval/grpc/client.go b/privval/grpc/client.go index 77f3930aa..f4c0b7d99 100644 --- a/privval/grpc/client.go +++ b/privval/grpc/client.go @@ -7,7 +7,7 @@ import ( "google.golang.org/grpc/status" "github.com/tendermint/tendermint/crypto" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/libs/log" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -62,7 +62,7 @@ func (sc *SignerClient) GetPubKey(ctx context.Context) (crypto.PubKey, error) { return nil, errStatus.Err() } - pk, err := cryptoenc.PubKeyFromProto(resp.PubKey) + pk, err := encoding.PubKeyFromProto(resp.PubKey) if err != nil { return nil, err } diff --git a/privval/grpc/server.go b/privval/grpc/server.go index f5c434b1b..13e0c9073 100644 --- a/privval/grpc/server.go +++ b/privval/grpc/server.go @@ -7,7 +7,7 @@ import ( "google.golang.org/grpc/status" "github.com/tendermint/tendermint/crypto" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/libs/log" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" "github.com/tendermint/tendermint/types" @@ -44,7 +44,7 @@ func (ss *SignerServer) GetPubKey(ctx context.Context, req *privvalproto.PubKeyR return nil, status.Errorf(codes.NotFound, "error getting pubkey: %v", err) } - pk, err := cryptoenc.PubKeyToProto(pubKey) + pk, err := encoding.PubKeyToProto(pubKey) if err != nil { return nil, status.Errorf(codes.Internal, "error transitioning pubkey to proto: %v", err) } diff --git a/privval/grpc/util.go b/privval/grpc/util.go index 62647542c..413acca8e 100644 --- a/privval/grpc/util.go +++ b/privval/grpc/util.go @@ -11,7 +11,7 @@ import ( grpc_retry "github.com/grpc-ecosystem/go-grpc-middleware/retry" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" grpc "google.golang.org/grpc" @@ -88,15 +88,15 @@ func GenerateTLS(certPath, keyPath, ca string, log log.Logger) grpc.DialOption { // DialRemoteSigner is a generalized function to dial the gRPC server. func DialRemoteSigner( - config *cfg.PrivValidatorConfig, + cfg *config.PrivValidatorConfig, chainID string, logger log.Logger, usePrometheus bool, ) (*SignerClient, error) { var transportSecurity grpc.DialOption - if config.AreSecurityOptionsPresent() { - transportSecurity = GenerateTLS(config.ClientCertificateFile(), - config.ClientKeyFile(), config.RootCAFile(), logger) + if cfg.AreSecurityOptionsPresent() { + transportSecurity = GenerateTLS(cfg.ClientCertificateFile(), + cfg.ClientKeyFile(), cfg.RootCAFile(), logger) } else { transportSecurity = grpc.WithInsecure() logger.Info("Using an insecure gRPC connection!") @@ -111,7 +111,7 @@ func DialRemoteSigner( dialOptions = append(dialOptions, transportSecurity) ctx := context.Background() - _, address := tmnet.ProtocolAndAddress(config.ListenAddr) + _, address := tmnet.ProtocolAndAddress(cfg.ListenAddr) conn, err := grpc.DialContext(ctx, address, dialOptions...) if err != nil { logger.Error("unable to connect to server", "target", address, "err", err) diff --git a/privval/msgs_test.go b/privval/msgs_test.go index bf532bd7b..7ac9f2c5d 100644 --- a/privval/msgs_test.go +++ b/privval/msgs_test.go @@ -10,7 +10,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/tmhash" cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" privproto "github.com/tendermint/tendermint/proto/tendermint/privval" @@ -60,7 +60,7 @@ func exampleProposal() *types.Proposal { // nolint:lll // ignore line length for tests func TestPrivvalVectors(t *testing.T) { pk := ed25519.GenPrivKeyFromSecret([]byte("it's a secret")).PubKey() - ppk, err := cryptoenc.PubKeyToProto(pk) + ppk, err := encoding.PubKeyToProto(pk) require.NoError(t, err) // Generate a simple vote diff --git a/privval/secret_connection.go b/privval/secret_connection.go index 8847f91db..ffa5d36ed 100644 --- a/privval/secret_connection.go +++ b/privval/secret_connection.go @@ -23,7 +23,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/libs/protoio" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/async" @@ -408,7 +408,7 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte // Send our info and receive theirs in tandem. var trs, _ = async.Parallel( func(_ int) (val interface{}, abort bool, err error) { - pbpk, err := cryptoenc.PubKeyToProto(pubKey) + pbpk, err := encoding.PubKeyToProto(pubKey) if err != nil { return nil, true, err } @@ -425,7 +425,7 @@ func shareAuthSignature(sc io.ReadWriter, pubKey crypto.PubKey, signature []byte return nil, true, err // abort } - pk, err := cryptoenc.PubKeyFromProto(pba.PubKey) + pk, err := encoding.PubKeyFromProto(pba.PubKey) if err != nil { return nil, true, err // abort } diff --git a/privval/signer_client.go b/privval/signer_client.go index d25584c8f..5e5b32a92 100644 --- a/privval/signer_client.go +++ b/privval/signer_client.go @@ -6,7 +6,7 @@ import ( "time" "github.com/tendermint/tendermint/crypto" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -83,7 +83,7 @@ func (sc *SignerClient) GetPubKey(ctx context.Context) (crypto.PubKey, error) { return nil, &RemoteSignerError{Code: int(resp.Error.Code), Description: resp.Error.Description} } - pk, err := cryptoenc.PubKeyFromProto(resp.PubKey) + pk, err := encoding.PubKeyFromProto(resp.PubKey) if err != nil { return nil, err } diff --git a/privval/signer_requestHandler.go b/privval/signer_requestHandler.go index 18ad8a996..d07c65620 100644 --- a/privval/signer_requestHandler.go +++ b/privval/signer_requestHandler.go @@ -5,7 +5,7 @@ import ( "fmt" "github.com/tendermint/tendermint/crypto" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -37,7 +37,7 @@ func DefaultValidationRequestHandler( if err != nil { return res, err } - pk, err := cryptoenc.PubKeyToProto(pubKey) + pk, err := encoding.PubKeyToProto(pubKey) if err != nil { return res, err } diff --git a/proto/tendermint/blocksync/message.go b/proto/tendermint/blocksync/message.go index d448ccc4b..1840c4e61 100644 --- a/proto/tendermint/blocksync/message.go +++ b/proto/tendermint/blocksync/message.go @@ -2,9 +2,9 @@ package blocksync import ( "errors" - fmt "fmt" + "fmt" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" ) const ( diff --git a/proto/tendermint/blocksync/message_test.go b/proto/tendermint/blocksync/message_test.go index dd1aebbd0..f81de149f 100644 --- a/proto/tendermint/blocksync/message_test.go +++ b/proto/tendermint/blocksync/message_test.go @@ -5,7 +5,7 @@ import ( math "math" "testing" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" diff --git a/proto/tendermint/consensus/message.go b/proto/tendermint/consensus/message.go index 51feffc22..bcdab629a 100644 --- a/proto/tendermint/consensus/message.go +++ b/proto/tendermint/consensus/message.go @@ -1,9 +1,9 @@ package consensus import ( - fmt "fmt" + "fmt" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" ) // Wrap implements the p2p Wrapper interface and wraps a consensus proto message. diff --git a/proto/tendermint/mempool/message.go b/proto/tendermint/mempool/message.go index 64a79bc81..a3e249f99 100644 --- a/proto/tendermint/mempool/message.go +++ b/proto/tendermint/mempool/message.go @@ -1,9 +1,9 @@ package mempool import ( - fmt "fmt" + "fmt" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" ) // Wrap implements the p2p Wrapper interface and wraps a mempool message. diff --git a/proto/tendermint/p2p/pex.go b/proto/tendermint/p2p/pex.go index 8ba8cd2b2..38c8239dd 100644 --- a/proto/tendermint/p2p/pex.go +++ b/proto/tendermint/p2p/pex.go @@ -1,9 +1,9 @@ package p2p import ( - fmt "fmt" + "fmt" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" ) // Wrap implements the p2p Wrapper interface and wraps a PEX message. diff --git a/proto/tendermint/rpc/grpc/types.proto b/proto/tendermint/rpc/grpc/types.proto deleted file mode 100644 index ee948a406..000000000 --- a/proto/tendermint/rpc/grpc/types.proto +++ /dev/null @@ -1,32 +0,0 @@ -syntax = "proto3"; -package tendermint.rpc.grpc; -option go_package = "github.com/tendermint/tendermint/rpc/grpc;coregrpc"; - -import "tendermint/abci/types.proto"; - -//---------------------------------------- -// Request types - -message RequestPing {} - -message RequestBroadcastTx { - bytes tx = 1; -} - -//---------------------------------------- -// Response types - -message ResponsePing {} - -message ResponseBroadcastTx { - tendermint.abci.ResponseCheckTx check_tx = 1; - tendermint.abci.ResponseDeliverTx deliver_tx = 2; -} - -//---------------------------------------- -// Service Definition - -service BroadcastAPI { - rpc Ping(RequestPing) returns (ResponsePing); - rpc BroadcastTx(RequestBroadcastTx) returns (ResponseBroadcastTx); -} diff --git a/proto/tendermint/statesync/message.go b/proto/tendermint/statesync/message.go index 6f9b6ad59..92d3764fd 100644 --- a/proto/tendermint/statesync/message.go +++ b/proto/tendermint/statesync/message.go @@ -2,9 +2,9 @@ package statesync import ( "errors" - fmt "fmt" + "fmt" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" ) // Wrap implements the p2p Wrapper interface and wraps a state sync proto message. @@ -28,6 +28,12 @@ func (m *Message) Wrap(pb proto.Message) error { case *LightBlockResponse: m.Sum = &Message_LightBlockResponse{LightBlockResponse: msg} + case *ParamsRequest: + m.Sum = &Message_ParamsRequest{ParamsRequest: msg} + + case *ParamsResponse: + m.Sum = &Message_ParamsResponse{ParamsResponse: msg} + default: return fmt.Errorf("unknown message: %T", msg) } @@ -57,6 +63,12 @@ func (m *Message) Unwrap() (proto.Message, error) { case *Message_LightBlockResponse: return m.GetLightBlockResponse(), nil + case *Message_ParamsRequest: + return m.GetParamsRequest(), nil + + case *Message_ParamsResponse: + return m.GetParamsResponse(), nil + default: return nil, fmt.Errorf("unknown message: %T", msg) } @@ -106,6 +118,17 @@ func (m *Message) Validate() error { // light block validation handled by the backfill process case *Message_LightBlockResponse: + case *Message_ParamsRequest: + if m.GetParamsRequest().Height == 0 { + return errors.New("height cannot be 0") + } + + case *Message_ParamsResponse: + resp := m.GetParamsResponse() + if resp.Height == 0 { + return errors.New("height cannot be 0") + } + default: return fmt.Errorf("unknown message type: %T", msg) } diff --git a/proto/tendermint/statesync/message_test.go b/proto/tendermint/statesync/message_test.go index dcf089130..cccd25766 100644 --- a/proto/tendermint/statesync/message_test.go +++ b/proto/tendermint/statesync/message_test.go @@ -4,11 +4,12 @@ import ( "encoding/hex" "testing" - proto "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" + "github.com/tendermint/tendermint/types" ) func TestValidateMsg(t *testing.T) { @@ -161,6 +162,35 @@ func TestStateSyncVectors(t *testing.T) { }, "2214080110021803220c697427732061206368756e6b", }, + { + "LightBlockRequest", + &ssproto.LightBlockRequest{ + Height: 100, + }, + "2a020864", + }, + { + "LightBlockResponse", + &ssproto.LightBlockResponse{ + LightBlock: nil, + }, + "3200", + }, + { + "ParamsRequest", + &ssproto.ParamsRequest{ + Height: 9001, + }, + "3a0308a946", + }, + { + "ParamsResponse", + &ssproto.ParamsResponse{ + Height: 9001, + ConsensusParams: types.DefaultConsensusParams().ToProto(), + }, + "423408a946122f0a10088080c00a10ffffffffffffffffff01120e08a08d0612040880c60a188080401a090a07656432353531392200", + }, } for _, tc := range testCases { diff --git a/proto/tendermint/statesync/types.pb.go b/proto/tendermint/statesync/types.pb.go index f5eab7a33..5541c2803 100644 --- a/proto/tendermint/statesync/types.pb.go +++ b/proto/tendermint/statesync/types.pb.go @@ -5,6 +5,7 @@ package statesync import ( fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" proto "github.com/gogo/protobuf/proto" types "github.com/tendermint/tendermint/proto/tendermint/types" io "io" @@ -31,6 +32,8 @@ type Message struct { // *Message_ChunkResponse // *Message_LightBlockRequest // *Message_LightBlockResponse + // *Message_ParamsRequest + // *Message_ParamsResponse Sum isMessage_Sum `protobuf_oneof:"sum"` } @@ -91,6 +94,12 @@ type Message_LightBlockRequest struct { type Message_LightBlockResponse struct { LightBlockResponse *LightBlockResponse `protobuf:"bytes,6,opt,name=light_block_response,json=lightBlockResponse,proto3,oneof" json:"light_block_response,omitempty"` } +type Message_ParamsRequest struct { + ParamsRequest *ParamsRequest `protobuf:"bytes,7,opt,name=params_request,json=paramsRequest,proto3,oneof" json:"params_request,omitempty"` +} +type Message_ParamsResponse struct { + ParamsResponse *ParamsResponse `protobuf:"bytes,8,opt,name=params_response,json=paramsResponse,proto3,oneof" json:"params_response,omitempty"` +} func (*Message_SnapshotsRequest) isMessage_Sum() {} func (*Message_SnapshotsResponse) isMessage_Sum() {} @@ -98,6 +107,8 @@ func (*Message_ChunkRequest) isMessage_Sum() {} func (*Message_ChunkResponse) isMessage_Sum() {} func (*Message_LightBlockRequest) isMessage_Sum() {} func (*Message_LightBlockResponse) isMessage_Sum() {} +func (*Message_ParamsRequest) isMessage_Sum() {} +func (*Message_ParamsResponse) isMessage_Sum() {} func (m *Message) GetSum() isMessage_Sum { if m != nil { @@ -148,6 +159,20 @@ func (m *Message) GetLightBlockResponse() *LightBlockResponse { return nil } +func (m *Message) GetParamsRequest() *ParamsRequest { + if x, ok := m.GetSum().(*Message_ParamsRequest); ok { + return x.ParamsRequest + } + return nil +} + +func (m *Message) GetParamsResponse() *ParamsResponse { + if x, ok := m.GetSum().(*Message_ParamsResponse); ok { + return x.ParamsResponse + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Message) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -157,6 +182,8 @@ func (*Message) XXX_OneofWrappers() []interface{} { (*Message_ChunkResponse)(nil), (*Message_LightBlockRequest)(nil), (*Message_LightBlockResponse)(nil), + (*Message_ParamsRequest)(nil), + (*Message_ParamsResponse)(nil), } } @@ -496,6 +523,102 @@ func (m *LightBlockResponse) GetLightBlock() *types.LightBlock { return nil } +type ParamsRequest struct { + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` +} + +func (m *ParamsRequest) Reset() { *m = ParamsRequest{} } +func (m *ParamsRequest) String() string { return proto.CompactTextString(m) } +func (*ParamsRequest) ProtoMessage() {} +func (*ParamsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_a1c2869546ca7914, []int{7} +} +func (m *ParamsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParamsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ParamsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ParamsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParamsRequest.Merge(m, src) +} +func (m *ParamsRequest) XXX_Size() int { + return m.Size() +} +func (m *ParamsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ParamsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ParamsRequest proto.InternalMessageInfo + +func (m *ParamsRequest) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 +} + +type ParamsResponse struct { + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + ConsensusParams types.ConsensusParams `protobuf:"bytes,2,opt,name=consensus_params,json=consensusParams,proto3" json:"consensus_params"` +} + +func (m *ParamsResponse) Reset() { *m = ParamsResponse{} } +func (m *ParamsResponse) String() string { return proto.CompactTextString(m) } +func (*ParamsResponse) ProtoMessage() {} +func (*ParamsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_a1c2869546ca7914, []int{8} +} +func (m *ParamsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ParamsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ParamsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ParamsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParamsResponse.Merge(m, src) +} +func (m *ParamsResponse) XXX_Size() int { + return m.Size() +} +func (m *ParamsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ParamsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ParamsResponse proto.InternalMessageInfo + +func (m *ParamsResponse) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *ParamsResponse) GetConsensusParams() types.ConsensusParams { + if m != nil { + return m.ConsensusParams + } + return types.ConsensusParams{} +} + func init() { proto.RegisterType((*Message)(nil), "tendermint.statesync.Message") proto.RegisterType((*SnapshotsRequest)(nil), "tendermint.statesync.SnapshotsRequest") @@ -504,43 +627,51 @@ func init() { proto.RegisterType((*ChunkResponse)(nil), "tendermint.statesync.ChunkResponse") proto.RegisterType((*LightBlockRequest)(nil), "tendermint.statesync.LightBlockRequest") proto.RegisterType((*LightBlockResponse)(nil), "tendermint.statesync.LightBlockResponse") + proto.RegisterType((*ParamsRequest)(nil), "tendermint.statesync.ParamsRequest") + proto.RegisterType((*ParamsResponse)(nil), "tendermint.statesync.ParamsResponse") } func init() { proto.RegisterFile("tendermint/statesync/types.proto", fileDescriptor_a1c2869546ca7914) } var fileDescriptor_a1c2869546ca7914 = []byte{ - // 485 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x54, 0x51, 0x6b, 0xd3, 0x50, - 0x14, 0x4e, 0x5c, 0xdb, 0x8d, 0xb3, 0x46, 0x96, 0x63, 0x91, 0x32, 0x46, 0x18, 0x11, 0x74, 0x20, - 0xa4, 0xa0, 0x8f, 0xe2, 0x4b, 0x7d, 0x99, 0x30, 0x5f, 0xee, 0x1c, 0xa8, 0x08, 0x23, 0x4d, 0xaf, - 0x4d, 0xb0, 0x49, 0x6a, 0xcf, 0x2d, 0xb8, 0x1f, 0xe0, 0x93, 0x2f, 0x82, 0x7f, 0xca, 0xc7, 0x3d, - 0xfa, 0x28, 0xed, 0x1f, 0x91, 0x9c, 0xdc, 0x26, 0x77, 0x6d, 0x5d, 0x11, 0xf6, 0x96, 0xef, 0xeb, - 0x77, 0x3e, 0xbe, 0x73, 0xcf, 0xe9, 0x81, 0x63, 0x25, 0xb3, 0xa1, 0x9c, 0xa6, 0x49, 0xa6, 0x7a, - 0xa4, 0x42, 0x25, 0xe9, 0x2a, 0x8b, 0x7a, 0xea, 0x6a, 0x22, 0x29, 0x98, 0x4c, 0x73, 0x95, 0x63, - 0xa7, 0x56, 0x04, 0x95, 0xe2, 0xf0, 0xc8, 0xa8, 0x63, 0xb5, 0x59, 0xe3, 0xff, 0x6c, 0xc0, 0xee, - 0x1b, 0x49, 0x14, 0x8e, 0x24, 0x5e, 0x80, 0x4b, 0x59, 0x38, 0xa1, 0x38, 0x57, 0x74, 0x39, 0x95, - 0x5f, 0x66, 0x92, 0x54, 0xd7, 0x3e, 0xb6, 0x4f, 0xf6, 0x9f, 0x3d, 0x0e, 0x36, 0x79, 0x07, 0xe7, - 0x4b, 0xb9, 0x28, 0xd5, 0xa7, 0x96, 0x38, 0xa0, 0x15, 0x0e, 0xdf, 0x01, 0x9a, 0xb6, 0x34, 0xc9, - 0x33, 0x92, 0xdd, 0x7b, 0xec, 0xfb, 0x64, 0xab, 0x6f, 0x29, 0x3f, 0xb5, 0x84, 0x4b, 0xab, 0x24, - 0xbe, 0x06, 0x27, 0x8a, 0x67, 0xd9, 0xe7, 0x2a, 0xec, 0x0e, 0x9b, 0xfa, 0x9b, 0x4d, 0x5f, 0x15, - 0xd2, 0x3a, 0x68, 0x3b, 0x32, 0x30, 0x9e, 0xc1, 0xfd, 0xa5, 0x95, 0x0e, 0xd8, 0x60, 0xaf, 0x47, - 0xb7, 0x7a, 0x55, 0xe1, 0x9c, 0xc8, 0x24, 0xf0, 0x3d, 0x3c, 0x18, 0x27, 0xa3, 0x58, 0x5d, 0x0e, - 0xc6, 0x79, 0x54, 0xc7, 0x6b, 0xde, 0xd6, 0xf3, 0x59, 0x51, 0xd0, 0x2f, 0xf4, 0x75, 0x46, 0x77, - 0xbc, 0x4a, 0xe2, 0x47, 0xe8, 0xdc, 0xb4, 0xd6, 0x71, 0x5b, 0xec, 0x7d, 0xb2, 0xdd, 0xbb, 0xca, - 0x8c, 0xe3, 0x35, 0xb6, 0xdf, 0x84, 0x1d, 0x9a, 0xa5, 0x3e, 0xc2, 0xc1, 0xea, 0x68, 0xfd, 0xef, - 0x36, 0xb8, 0x6b, 0x73, 0xc1, 0x87, 0xd0, 0x8a, 0x65, 0xe1, 0xc3, 0x8b, 0xd2, 0x10, 0x1a, 0x15, - 0xfc, 0xa7, 0x7c, 0x9a, 0x86, 0x8a, 0x07, 0xed, 0x08, 0x8d, 0x0a, 0x9e, 0x9f, 0x8a, 0x78, 0x56, - 0x8e, 0xd0, 0x08, 0x11, 0x1a, 0x71, 0x48, 0x31, 0xbf, 0x7a, 0x5b, 0xf0, 0x37, 0x1e, 0xc2, 0x5e, - 0x2a, 0x55, 0x38, 0x0c, 0x55, 0xc8, 0x4f, 0xd7, 0x16, 0x15, 0xf6, 0xdf, 0x42, 0xdb, 0x9c, 0xe7, - 0x7f, 0xe7, 0xe8, 0x40, 0x33, 0xc9, 0x86, 0xf2, 0xab, 0x8e, 0x51, 0x02, 0xff, 0x9b, 0x0d, 0xce, - 0x8d, 0xd1, 0xde, 0x8d, 0x6f, 0xc1, 0x72, 0x9f, 0xba, 0xbd, 0x12, 0x60, 0x17, 0x76, 0xd3, 0x84, - 0x28, 0xc9, 0x46, 0xdc, 0xde, 0x9e, 0x58, 0x42, 0xff, 0x29, 0xb8, 0x6b, 0xeb, 0xf0, 0xaf, 0x28, - 0xfe, 0x39, 0xe0, 0xfa, 0x7c, 0xf1, 0x25, 0xec, 0x1b, 0x7b, 0xa2, 0xff, 0xc6, 0x47, 0xe6, 0x7a, - 0x94, 0x67, 0xc0, 0x28, 0x85, 0x7a, 0x21, 0xfa, 0x17, 0xbf, 0xe6, 0x9e, 0x7d, 0x3d, 0xf7, 0xec, - 0x3f, 0x73, 0xcf, 0xfe, 0xb1, 0xf0, 0xac, 0xeb, 0x85, 0x67, 0xfd, 0x5e, 0x78, 0xd6, 0x87, 0x17, - 0xa3, 0x44, 0xc5, 0xb3, 0x41, 0x10, 0xe5, 0x69, 0xcf, 0x3c, 0x2d, 0xf5, 0x27, 0x5f, 0x96, 0xde, - 0xa6, 0x73, 0x35, 0x68, 0xf1, 0x6f, 0xcf, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0xc1, 0x45, 0x35, - 0xee, 0xcd, 0x04, 0x00, 0x00, + // 589 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x95, 0x4f, 0x8b, 0xd3, 0x40, + 0x18, 0xc6, 0x13, 0xb7, 0xdd, 0x96, 0x77, 0x9b, 0x6e, 0x3b, 0x16, 0x29, 0x65, 0x8d, 0x6b, 0x14, + 0x77, 0x41, 0x68, 0x41, 0x8f, 0xe2, 0xa5, 0x7b, 0x59, 0x61, 0x45, 0x99, 0x75, 0x41, 0x45, 0x28, + 0x69, 0x3a, 0x26, 0xc1, 0xe6, 0x8f, 0x7d, 0xa7, 0xe0, 0x82, 0x57, 0x4f, 0x5e, 0xfc, 0x2c, 0x7e, + 0x8a, 0x3d, 0xee, 0xd1, 0x93, 0x48, 0xfb, 0x45, 0x24, 0x93, 0x69, 0x32, 0x6d, 0xda, 0x2e, 0x82, + 0xb7, 0xbc, 0xcf, 0x3c, 0xf9, 0xf5, 0x99, 0xc9, 0xc3, 0x14, 0x0e, 0x39, 0x0b, 0x47, 0x6c, 0x12, + 0xf8, 0x21, 0xef, 0x21, 0xb7, 0x39, 0xc3, 0xcb, 0xd0, 0xe9, 0xf1, 0xcb, 0x98, 0x61, 0x37, 0x9e, + 0x44, 0x3c, 0x22, 0xad, 0xdc, 0xd1, 0xcd, 0x1c, 0x9d, 0x96, 0x1b, 0xb9, 0x91, 0x30, 0xf4, 0x92, + 0xa7, 0xd4, 0xdb, 0x39, 0x50, 0x68, 0x82, 0xa1, 0x92, 0x3a, 0x77, 0x0b, 0xab, 0xb1, 0x3d, 0xb1, + 0x03, 0xb9, 0x6c, 0xfd, 0x2c, 0x43, 0xe5, 0x25, 0x43, 0xb4, 0x5d, 0x46, 0x2e, 0xa0, 0x89, 0xa1, + 0x1d, 0xa3, 0x17, 0x71, 0x1c, 0x4c, 0xd8, 0xe7, 0x29, 0x43, 0xde, 0xd6, 0x0f, 0xf5, 0xe3, 0xbd, + 0x27, 0x8f, 0xba, 0xeb, 0x02, 0x75, 0xcf, 0x17, 0x76, 0x9a, 0xba, 0x4f, 0x35, 0xda, 0xc0, 0x15, + 0x8d, 0xbc, 0x05, 0xa2, 0x62, 0x31, 0x8e, 0x42, 0x64, 0xed, 0x5b, 0x82, 0x7b, 0x74, 0x23, 0x37, + 0xb5, 0x9f, 0x6a, 0xb4, 0x89, 0xab, 0x22, 0x79, 0x01, 0x86, 0xe3, 0x4d, 0xc3, 0x4f, 0x59, 0xd8, + 0x1d, 0x01, 0xb5, 0xd6, 0x43, 0x4f, 0x12, 0x6b, 0x1e, 0xb4, 0xe6, 0x28, 0x33, 0x39, 0x83, 0xfa, + 0x02, 0x25, 0x03, 0x96, 0x04, 0xeb, 0xc1, 0x56, 0x56, 0x16, 0xce, 0x70, 0x54, 0x81, 0xbc, 0x83, + 0xdb, 0x63, 0xdf, 0xf5, 0xf8, 0x60, 0x38, 0x8e, 0x9c, 0x3c, 0x5e, 0x79, 0xdb, 0x9e, 0xcf, 0x92, + 0x17, 0xfa, 0x89, 0x3f, 0xcf, 0xd8, 0x1c, 0xaf, 0x8a, 0xe4, 0x03, 0xb4, 0x96, 0xd1, 0x32, 0xee, + 0xae, 0x60, 0x1f, 0xdf, 0xcc, 0xce, 0x32, 0x93, 0x71, 0x41, 0x4d, 0x8e, 0x21, 0xad, 0x47, 0x96, + 0xb9, 0xb2, 0xed, 0x18, 0x5e, 0x0b, 0x6f, 0x9e, 0xd7, 0x88, 0x55, 0x81, 0xbc, 0x82, 0xfd, 0x8c, + 0x26, 0x63, 0x56, 0x05, 0xee, 0xe1, 0x76, 0x5c, 0x16, 0xb1, 0x1e, 0x2f, 0x29, 0xfd, 0x32, 0xec, + 0xe0, 0x34, 0xb0, 0x08, 0x34, 0x56, 0x9b, 0x67, 0x7d, 0xd7, 0xa1, 0x59, 0xa8, 0x0d, 0xb9, 0x03, + 0xbb, 0x1e, 0x4b, 0xb6, 0x29, 0x7a, 0x5c, 0xa2, 0x72, 0x4a, 0xf4, 0x8f, 0xd1, 0x24, 0xb0, 0xb9, + 0xe8, 0xa1, 0x41, 0xe5, 0x94, 0xe8, 0xe2, 0x4b, 0xa2, 0xa8, 0x92, 0x41, 0xe5, 0x44, 0x08, 0x94, + 0x3c, 0x1b, 0x3d, 0x51, 0x8a, 0x1a, 0x15, 0xcf, 0xa4, 0x03, 0xd5, 0x80, 0x71, 0x7b, 0x64, 0x73, + 0x5b, 0x7c, 0xd9, 0x1a, 0xcd, 0x66, 0xeb, 0x0d, 0xd4, 0xd4, 0xba, 0xfd, 0x73, 0x8e, 0x16, 0x94, + 0xfd, 0x70, 0xc4, 0xbe, 0xc8, 0x18, 0xe9, 0x60, 0x7d, 0xd3, 0xc1, 0x58, 0x6a, 0xde, 0xff, 0xe1, + 0x26, 0xaa, 0xd8, 0xa7, 0xdc, 0x5e, 0x3a, 0x90, 0x36, 0x54, 0x02, 0x1f, 0xd1, 0x0f, 0x5d, 0xb1, + 0xbd, 0x2a, 0x5d, 0x8c, 0xd6, 0x63, 0x68, 0x16, 0xda, 0xba, 0x29, 0x8a, 0x75, 0x0e, 0xa4, 0x58, + 0x3f, 0xf2, 0x1c, 0xf6, 0x94, 0x1a, 0xcb, 0x5b, 0xe6, 0x40, 0xad, 0x45, 0x7a, 0x89, 0x29, 0xaf, + 0x42, 0xde, 0x57, 0xeb, 0x08, 0x8c, 0xa5, 0xee, 0x6d, 0xfc, 0xf5, 0xaf, 0x50, 0x5f, 0x6e, 0xd5, + 0xc6, 0x23, 0xa3, 0xd0, 0x70, 0x12, 0x43, 0x88, 0x53, 0x1c, 0xa4, 0xbd, 0x93, 0x97, 0xd4, 0xfd, + 0x62, 0xac, 0x93, 0x85, 0x33, 0x85, 0xf7, 0x4b, 0x57, 0xbf, 0xef, 0x69, 0x74, 0xdf, 0x59, 0x91, + 0x2f, 0xae, 0x66, 0xa6, 0x7e, 0x3d, 0x33, 0xf5, 0x3f, 0x33, 0x53, 0xff, 0x31, 0x37, 0xb5, 0xeb, + 0xb9, 0xa9, 0xfd, 0x9a, 0x9b, 0xda, 0xfb, 0x67, 0xae, 0xcf, 0xbd, 0xe9, 0xb0, 0xeb, 0x44, 0x41, + 0x4f, 0xbd, 0xa1, 0xf3, 0xc7, 0xf4, 0x9e, 0x5f, 0xf7, 0x4f, 0x31, 0xdc, 0x15, 0x6b, 0x4f, 0xff, + 0x06, 0x00, 0x00, 0xff, 0xff, 0xa1, 0xb2, 0xfd, 0x65, 0x48, 0x06, 0x00, 0x00, } func (m *Message) Marshal() (dAtA []byte, err error) { @@ -701,6 +832,48 @@ func (m *Message_LightBlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, err } return len(dAtA) - i, nil } +func (m *Message_ParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_ParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ParamsRequest != nil { + { + size, err := m.ParamsRequest.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} +func (m *Message_ParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Message_ParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ParamsResponse != nil { + { + size, err := m.ParamsResponse.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} func (m *SnapshotsRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -932,6 +1105,72 @@ func (m *LightBlockResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ParamsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParamsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParamsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ParamsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ParamsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ParamsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ConsensusParams.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { offset -= sovTypes(v) base := offset @@ -1027,6 +1266,30 @@ func (m *Message_LightBlockResponse) Size() (n int) { } return n } +func (m *Message_ParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParamsRequest != nil { + l = m.ParamsRequest.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Message_ParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ParamsResponse != nil { + l = m.ParamsResponse.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} func (m *SnapshotsRequest) Size() (n int) { if m == nil { return 0 @@ -1130,6 +1393,32 @@ func (m *LightBlockResponse) Size() (n int) { return n } +func (m *ParamsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + return n +} + +func (m *ParamsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + l = m.ConsensusParams.Size() + n += 1 + l + sovTypes(uint64(l)) + return n +} + func sovTypes(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -1375,6 +1664,76 @@ func (m *Message) Unmarshal(dAtA []byte) error { } m.Sum = &Message_LightBlockResponse{v} iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParamsRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ParamsRequest{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_ParamsRequest{v} + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ParamsResponse", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ParamsResponse{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Sum = &Message_ParamsResponse{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -2044,6 +2403,177 @@ func (m *LightBlockResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *ParamsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParamsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParamsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ParamsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ParamsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ParamsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConsensusParams", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ConsensusParams.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipTypes(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/proto/tendermint/statesync/types.proto b/proto/tendermint/statesync/types.proto index a4dd8e693..fcfd05f68 100644 --- a/proto/tendermint/statesync/types.proto +++ b/proto/tendermint/statesync/types.proto @@ -1,7 +1,9 @@ syntax = "proto3"; package tendermint.statesync; +import "gogoproto/gogo.proto"; import "tendermint/types/types.proto"; +import "tendermint/types/params.proto"; option go_package = "github.com/tendermint/tendermint/proto/tendermint/statesync"; @@ -13,6 +15,8 @@ message Message { ChunkResponse chunk_response = 4; LightBlockRequest light_block_request = 5; LightBlockResponse light_block_response = 6; + ParamsRequest params_request = 7; + ParamsResponse params_response = 8; } } @@ -46,4 +50,13 @@ message LightBlockRequest { message LightBlockResponse { tendermint.types.LightBlock light_block = 1; +} + +message ParamsRequest { + uint64 height = 1; +} + +message ParamsResponse { + uint64 height = 1; + tendermint.types.ConsensusParams consensus_params = 2 [(gogoproto.nullable) = false]; } \ No newline at end of file diff --git a/proxy/client.go b/proxy/client.go deleted file mode 100644 index 929933e01..000000000 --- a/proxy/client.go +++ /dev/null @@ -1,94 +0,0 @@ -package proxy - -import ( - "fmt" - "io" - - abcicli "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/abci/types" - tmsync "github.com/tendermint/tendermint/internal/libs/sync" -) - -//go:generate ../scripts/mockery_generate.sh ClientCreator - -// ClientCreator creates new ABCI clients. -type ClientCreator interface { - // NewABCIClient returns a new ABCI client. - NewABCIClient() (abcicli.Client, error) -} - -//---------------------------------------------------- -// local proxy uses a mutex on an in-proc app - -type localClientCreator struct { - mtx *tmsync.RWMutex - app types.Application -} - -// NewLocalClientCreator returns a ClientCreator for the given app, -// which will be running locally. -func NewLocalClientCreator(app types.Application) ClientCreator { - return &localClientCreator{ - mtx: new(tmsync.RWMutex), - app: app, - } -} - -func (l *localClientCreator) NewABCIClient() (abcicli.Client, error) { - return abcicli.NewLocalClient(l.mtx, l.app), nil -} - -//--------------------------------------------------------------- -// remote proxy opens new connections to an external app process - -type remoteClientCreator struct { - addr string - transport string - mustConnect bool -} - -// NewRemoteClientCreator returns a ClientCreator for the given address (e.g. -// "192.168.0.1") and transport (e.g. "tcp"). Set mustConnect to true if you -// want the client to connect before reporting success. -func NewRemoteClientCreator(addr, transport string, mustConnect bool) ClientCreator { - return &remoteClientCreator{ - addr: addr, - transport: transport, - mustConnect: mustConnect, - } -} - -func (r *remoteClientCreator) NewABCIClient() (abcicli.Client, error) { - remoteApp, err := abcicli.NewClient(r.addr, r.transport, r.mustConnect) - if err != nil { - return nil, fmt.Errorf("failed to connect to proxy: %w", err) - } - - return remoteApp, nil -} - -// DefaultClientCreator returns a default ClientCreator, which will create a -// local client if addr is one of: 'kvstore', -// 'persistent_kvstore' or 'noop', otherwise - a remote client. -// -// The Closer is a noop except for persistent_kvstore applications, -// which will clean up the store. -func DefaultClientCreator(addr, transport, dbDir string) (ClientCreator, io.Closer) { - switch addr { - case "kvstore": - return NewLocalClientCreator(kvstore.NewApplication()), noopCloser{} - case "persistent_kvstore": - app := kvstore.NewPersistentKVStoreApplication(dbDir) - return NewLocalClientCreator(app), app - case "noop": - return NewLocalClientCreator(types.NewBaseApplication()), noopCloser{} - default: - mustConnect := false // loop retrying - return NewRemoteClientCreator(addr, transport, mustConnect), noopCloser{} - } -} - -type noopCloser struct{} - -func (noopCloser) Close() error { return nil } diff --git a/proxy/mocks/client_creator.go b/proxy/mocks/client_creator.go deleted file mode 100644 index 0e4157c2f..000000000 --- a/proxy/mocks/client_creator.go +++ /dev/null @@ -1,36 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - mock "github.com/stretchr/testify/mock" - abcicli "github.com/tendermint/tendermint/abci/client" -) - -// ClientCreator is an autogenerated mock type for the ClientCreator type -type ClientCreator struct { - mock.Mock -} - -// NewABCIClient provides a mock function with given fields: -func (_m *ClientCreator) NewABCIClient() (abcicli.Client, error) { - ret := _m.Called() - - var r0 abcicli.Client - if rf, ok := ret.Get(0).(func() abcicli.Client); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(abcicli.Client) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func() error); ok { - r1 = rf() - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} diff --git a/release_notes.md b/release_notes.md deleted file mode 100644 index a537871c5..000000000 --- a/release_notes.md +++ /dev/null @@ -1 +0,0 @@ - diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index 4acd0fee9..3b91de107 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -3,7 +3,6 @@ package client_test import ( "context" "fmt" - "reflect" "testing" "time" @@ -13,11 +12,11 @@ import ( abci "github.com/tendermint/tendermint/abci/types" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) -var waitForEventTimeout = 8 * time.Second +const waitForEventTimeout = 2 * time.Second // MakeTxKV returns a text transaction, allong with expected key, value pair func MakeTxKV() ([]byte, []byte, []byte) { @@ -26,164 +25,41 @@ func MakeTxKV() ([]byte, []byte, []byte) { return k, v, append(k, append([]byte("="), v...)...) } -func TestHeaderEvents(t *testing.T) { - n, conf := NodeSuite(t) +func testTxEventsSent(ctx context.Context, t *testing.T, broadcastMethod string, c client.Client) { + // make the tx + _, _, tx := MakeTxKV() - for i, c := range GetClients(t, n, conf) { - i, c := i, c - t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err, "%d: %+v", i, err) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Error(err) - } - }) - } + // send + done := make(chan struct{}) + go func() { + defer close(done) + var ( + txres *coretypes.ResultBroadcastTx + err error + ) + switch broadcastMethod { + case "async": + txres, err = c.BroadcastTxAsync(ctx, tx) + case "sync": + txres, err = c.BroadcastTxSync(ctx, tx) + default: + panic(fmt.Sprintf("Unknown broadcastMethod %s", broadcastMethod)) + } + if assert.NoError(t, err) { + assert.Equal(t, txres.Code, abci.CodeTypeOK) + } + }() - evt, err := client.WaitForOneEvent(c, types.EventNewBlockHeaderValue, waitForEventTimeout) - require.Nil(t, err, "%d: %+v", i, err) - _, ok := evt.(types.EventDataNewBlockHeader) - require.True(t, ok, "%d: %#v", i, evt) - // TODO: more checks... - }) - } -} - -// subscribe to new blocks and make sure height increments by 1 -func TestBlockEvents(t *testing.T) { - n, conf := NodeSuite(t) - for _, c := range GetClients(t, n, conf) { - c := c - t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Error(err) - } - }) - } - - const subscriber = "TestBlockEvents" - - eventCh, err := c.Subscribe(context.Background(), subscriber, types.QueryForEvent(types.EventNewBlockValue).String()) - require.NoError(t, err) - t.Cleanup(func() { - if err := c.UnsubscribeAll(context.Background(), subscriber); err != nil { - t.Error(err) - } - }) - - var firstBlockHeight int64 - for i := int64(0); i < 3; i++ { - event := <-eventCh - blockEvent, ok := event.Data.(types.EventDataNewBlock) - require.True(t, ok) - - block := blockEvent.Block - - if firstBlockHeight == 0 { - firstBlockHeight = block.Header.Height - } - - require.Equal(t, firstBlockHeight+i, block.Header.Height) - } - }) - } -} - -func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) { testTxEventsSent(t, "async") } -func TestTxEventsSentWithBroadcastTxSync(t *testing.T) { testTxEventsSent(t, "sync") } - -func testTxEventsSent(t *testing.T, broadcastMethod string) { - n, conf := NodeSuite(t) - for _, c := range GetClients(t, n, conf) { - c := c - t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err) - t.Cleanup(func() { - if err := c.Stop(); err != nil { - t.Error(err) - } - }) - } - - // make the tx - _, _, tx := MakeTxKV() - - // send - go func() { - var ( - txres *ctypes.ResultBroadcastTx - err error - ctx = context.Background() - ) - switch broadcastMethod { - case "async": - txres, err = c.BroadcastTxAsync(ctx, tx) - case "sync": - txres, err = c.BroadcastTxSync(ctx, tx) - default: - panic(fmt.Sprintf("Unknown broadcastMethod %s", broadcastMethod)) - } - if assert.NoError(t, err) { - assert.Equal(t, txres.Code, abci.CodeTypeOK) - } - }() - - // and wait for confirmation - evt, err := client.WaitForOneEvent(c, types.EventTxValue, waitForEventTimeout) - require.Nil(t, err) - - // and make sure it has the proper info - txe, ok := evt.(types.EventDataTx) - require.True(t, ok) - - // make sure this is the proper tx - require.EqualValues(t, tx, txe.Tx) - require.True(t, txe.Result.IsOK()) - }) - } -} - -// Test HTTPClient resubscribes upon disconnect && subscription error. -// Test Local client resubscribes upon subscription error. -func TestClientsResubscribe(t *testing.T) { - // TODO(melekes) -} - -func TestHTTPReturnsErrorIfClientIsNotRunning(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - - c := getHTTPClient(t, conf) - - // on Subscribe - _, err := c.Subscribe(ctx, "TestHeaderEvents", - types.QueryForEvent(types.EventNewBlockHeaderValue).String()) - assert.Error(t, err) - - // on Unsubscribe - err = c.Unsubscribe(ctx, "TestHeaderEvents", - types.QueryForEvent(types.EventNewBlockHeaderValue).String()) - assert.Error(t, err) - - // on UnsubscribeAll - err = c.UnsubscribeAll(ctx, "TestHeaderEvents") - assert.Error(t, err) + // and wait for confirmation + evt, err := client.WaitForOneEvent(c, types.EventTxValue, waitForEventTimeout) + require.Nil(t, err) + + // and make sure it has the proper info + txe, ok := evt.(types.EventDataTx) + require.True(t, ok) + + // make sure this is the proper tx + require.EqualValues(t, tx, txe.Tx) + require.True(t, txe.Result.IsOK()) + <-done } diff --git a/rpc/client/evidence_test.go b/rpc/client/evidence_test.go index 0ff158e56..ae4e29f52 100644 --- a/rpc/client/evidence_test.go +++ b/rpc/client/evidence_test.go @@ -1,17 +1,12 @@ package client_test import ( - "bytes" "context" "testing" "time" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/tmhash" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/privval" @@ -29,7 +24,7 @@ var defaultTestTime = time.Date(2018, 10, 10, 8, 20, 13, 695936996, time.UTC) func newEvidence(t *testing.T, val *privval.FilePV, vote *types.Vote, vote2 *types.Vote, chainID string) *types.DuplicateVoteEvidence { - + t.Helper() var err error v := vote.ToProto() @@ -44,7 +39,9 @@ func newEvidence(t *testing.T, val *privval.FilePV, validator := types.NewValidator(val.Key.PubKey, 10) valSet := types.NewValidatorSet([]*types.Validator{validator}) - return types.NewDuplicateVoteEvidence(vote, vote2, defaultTestTime, valSet) + ev, err := types.NewDuplicateVoteEvidence(vote, vote2, defaultTestTime, valSet) + require.NoError(t, err) + return ev } func makeEvidences( @@ -111,64 +108,6 @@ func makeEvidences( return correct, fakes } -func TestBroadcastEvidence_DuplicateVoteEvidence(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, config := NodeSuite(t) - chainID := config.ChainID() - - pv, err := privval.LoadOrGenFilePV(config.PrivValidator.KeyFile(), config.PrivValidator.StateFile()) - require.NoError(t, err) - - for i, c := range GetClients(t, n, config) { - correct, fakes := makeEvidences(t, pv, chainID) - t.Logf("client %d", i) - - // make sure that the node has produced enough blocks - waitForBlock(ctx, t, c, 2) - - result, err := c.BroadcastEvidence(ctx, correct) - require.NoError(t, err, "BroadcastEvidence(%s) failed", correct) - assert.Equal(t, correct.Hash(), result.Hash, "expected result hash to match evidence hash") - - status, err := c.Status(ctx) - require.NoError(t, err) - err = client.WaitForHeight(c, status.SyncInfo.LatestBlockHeight+2, nil) - require.NoError(t, err) - - ed25519pub := pv.Key.PubKey.(ed25519.PubKey) - rawpub := ed25519pub.Bytes() - result2, err := c.ABCIQuery(ctx, "/val", rawpub) - require.NoError(t, err) - qres := result2.Response - require.True(t, qres.IsOK()) - - var v abci.ValidatorUpdate - err = abci.ReadMessage(bytes.NewReader(qres.Value), &v) - require.NoError(t, err, "Error reading query result, value %v", qres.Value) - - pk, err := cryptoenc.PubKeyFromProto(v.PubKey) - require.NoError(t, err) - - require.EqualValues(t, rawpub, pk, "Stored PubKey not equal with expected, value %v", string(qres.Value)) - require.Equal(t, int64(9), v.Power, "Stored Power not equal with expected, value %v", string(qres.Value)) - - for _, fake := range fakes { - _, err := c.BroadcastEvidence(ctx, fake) - require.Error(t, err, "BroadcastEvidence(%s) succeeded, but the evidence was fake", fake) - } - } -} - -func TestBroadcastEmptyEvidence(t *testing.T) { - n, conf := NodeSuite(t) - for _, c := range GetClients(t, n, conf) { - _, err := c.BroadcastEvidence(context.Background(), nil) - assert.Error(t, err) - } -} - func waitForBlock(ctx context.Context, t *testing.T, c client.Client, height int64) { timer := time.NewTimer(0 * time.Millisecond) defer timer.Stop() diff --git a/rpc/client/examples_test.go b/rpc/client/examples_test.go index 8c4c4f277..38fb4fcf7 100644 --- a/rpc/client/examples_test.go +++ b/rpc/client/examples_test.go @@ -3,16 +3,20 @@ package client_test import ( "bytes" "context" - "fmt" "log" + "net/http" + "testing" + "time" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" rpchttp "github.com/tendermint/tendermint/rpc/client/http" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctest "github.com/tendermint/tendermint/rpc/test" ) -func ExampleHTTP_simple() { +func TestHTTPSimple(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -29,9 +33,7 @@ func ExampleHTTP_simple() { // Create our RPC client rpcAddr := conf.RPC.ListenAddress c, err := rpchttp.New(rpcAddr) - if err != nil { - log.Fatal(err) - } + require.NoError(t, err) // Create a transaction k := []byte("name") @@ -41,6 +43,7 @@ func ExampleHTTP_simple() { // Broadcast the transaction and wait for it to commit (rather use // c.BroadcastTxSync though in production). bres, err := c.BroadcastTxCommit(context.Background(), tx) + require.NoError(t, err) if err != nil { log.Fatal(err) } @@ -50,30 +53,19 @@ func ExampleHTTP_simple() { // Now try to fetch the value for the key qres, err := c.ABCIQuery(context.Background(), "/key", k) - if err != nil { - log.Fatal(err) - } - if qres.Response.IsErr() { - log.Fatal("ABCIQuery failed") - } - if !bytes.Equal(qres.Response.Key, k) { - log.Fatal("returned key does not match queried key") - } - if !bytes.Equal(qres.Response.Value, v) { - log.Fatal("returned value does not match sent value") - } + require.NoError(t, err) + require.False(t, qres.Response.IsErr(), "ABCIQuery failed") + require.True(t, bytes.Equal(qres.Response.Key, k), + "returned key does not match queried key") + require.True(t, bytes.Equal(qres.Response.Value, v), + "returned value does not match sent value [%s]", string(v)) - fmt.Println("Sent tx :", string(tx)) - fmt.Println("Queried for :", string(qres.Response.Key)) - fmt.Println("Got value :", string(qres.Response.Value)) - - // Output: - // Sent tx : name=satoshi - // Queried for : name - // Got value : satoshi + assert.Equal(t, "name=satoshi", string(tx), "sent tx") + assert.Equal(t, "name", string(qres.Response.Key), "queried for") + assert.Equal(t, "satoshi", string(qres.Response.Value), "got value") } -func ExampleHTTP_batching() { +func TestHTTPBatching(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -88,10 +80,8 @@ func ExampleHTTP_batching() { defer func() { _ = closer(ctx) }() rpcAddr := conf.RPC.ListenAddress - c, err := rpchttp.New(rpcAddr) - if err != nil { - log.Fatal(err) - } + c, err := rpchttp.NewWithClient(rpcAddr, http.DefaultClient) + require.NoError(t, err) // Create our two transactions k1 := []byte("firstName") @@ -111,41 +101,51 @@ func ExampleHTTP_batching() { for _, tx := range txs { // Broadcast the transaction and wait for it to commit (rather use // c.BroadcastTxSync though in production). - if _, err := batch.BroadcastTxCommit(context.Background(), tx); err != nil { - log.Fatal(err) - } + _, err := batch.BroadcastTxSync(ctx, tx) + require.NoError(t, err) } // Send the batch of 2 transactions - if _, err := batch.Send(context.Background()); err != nil { - log.Fatal(err) - } + _, err = batch.Send(ctx) + require.NoError(t, err) - // Now let's query for the original results as a batch - keys := [][]byte{k1, k2} - for _, key := range keys { - if _, err := batch.ABCIQuery(context.Background(), "/key", key); err != nil { - log.Fatal(err) - } - } + // wait for the transaction to land, we could poll more for + // the transactions to land definitively. + require.Eventually(t, + func() bool { + // Now let's query for the original results as a batch + exists := 0 + for _, key := range [][]byte{k1, k2} { + _, err := batch.ABCIQuery(context.Background(), "/key", key) + if err == nil { + exists++ + + } + } + return exists == 2 + }, + 10*time.Second, + time.Second, + ) // Send the 2 queries and keep the results - results, err := batch.Send(context.Background()) - if err != nil { - log.Fatal(err) - } + results, err := batch.Send(ctx) + require.NoError(t, err) + require.Len(t, results, 2) // Each result in the returned list is the deserialized result of each // respective ABCIQuery response for _, result := range results { - qr, ok := result.(*ctypes.ResultABCIQuery) - if !ok { - log.Fatal("invalid result type from ABCIQuery request") - } - fmt.Println(string(qr.Response.Key), "=", string(qr.Response.Value)) - } + qr, ok := result.(*coretypes.ResultABCIQuery) + require.True(t, ok, "invalid result type from ABCIQuery request") - // Output: - // firstName = satoshi - // lastName = nakamoto + switch string(qr.Response.Key) { + case "firstName": + require.Equal(t, "satoshi", string(qr.Response.Value)) + case "lastName": + require.Equal(t, "nakamoto", string(qr.Response.Value)) + default: + t.Fatalf("encountered unknown key %q", string(qr.Response.Key)) + } + } } diff --git a/rpc/client/helpers.go b/rpc/client/helpers.go index 49598e814..58e48dbba 100644 --- a/rpc/client/helpers.go +++ b/rpc/client/helpers.go @@ -4,8 +4,10 @@ import ( "context" "errors" "fmt" + "sync" "time" + "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" ) @@ -77,8 +79,91 @@ func WaitForOneEvent(c EventsClient, eventValue string, timeout time.Duration) ( select { case event := <-eventCh: - return event.Data.(types.TMEventData), nil + return event.Data, nil case <-ctx.Done(): return nil, errors.New("timed out waiting for event") } } + +var ( + // ErrClientRunning is returned by Start when the client is already running. + ErrClientRunning = errors.New("client already running") + + // ErrClientNotRunning is returned by Stop when the client is not running. + ErrClientNotRunning = errors.New("client is not running") +) + +// RunState is a helper that a client implementation can embed to implement +// common plumbing for keeping track of run state and logging. +// +// TODO(creachadair): This type is a temporary measure, and will be removed. +// See the discussion on #6971. +type RunState struct { + Logger log.Logger + + mu sync.Mutex + name string + isRunning bool + quit chan struct{} +} + +// NewRunState returns a new unstarted run state tracker with the given logging +// label and log sink. If logger == nil, a no-op logger is provided by default. +func NewRunState(name string, logger log.Logger) *RunState { + if logger == nil { + logger = log.NewNopLogger() + } + return &RunState{ + name: name, + Logger: logger, + } +} + +// Start sets the state to running, or reports an error. +func (r *RunState) Start() error { + r.mu.Lock() + defer r.mu.Unlock() + if r.isRunning { + r.Logger.Error("not starting client, it is already started", "client", r.name) + return ErrClientRunning + } + r.Logger.Info("starting client", "client", r.name) + r.isRunning = true + r.quit = make(chan struct{}) + return nil +} + +// Stop sets the state to not running, or reports an error. +func (r *RunState) Stop() error { + r.mu.Lock() + defer r.mu.Unlock() + if !r.isRunning { + r.Logger.Error("not stopping client; it is already stopped", "client", r.name) + return ErrClientNotRunning + } + r.Logger.Info("stopping client", "client", r.name) + r.isRunning = false + close(r.quit) + return nil +} + +// SetLogger updates the log sink. +func (r *RunState) SetLogger(logger log.Logger) { + r.mu.Lock() + defer r.mu.Unlock() + r.Logger = logger +} + +// IsRunning reports whether the state is running. +func (r *RunState) IsRunning() bool { + r.mu.Lock() + defer r.mu.Unlock() + return r.isRunning +} + +// Quit returns a channel that is closed when a call to Stop succeeds. +func (r *RunState) Quit() <-chan struct{} { + r.mu.Lock() + defer r.mu.Unlock() + return r.quit +} diff --git a/rpc/client/helpers_test.go b/rpc/client/helpers_test.go index 3b78dfe5f..60732b991 100644 --- a/rpc/client/helpers_test.go +++ b/rpc/client/helpers_test.go @@ -10,7 +10,7 @@ import ( "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client/mock" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" ) func TestWaitForHeight(t *testing.T) { @@ -33,7 +33,7 @@ func TestWaitForHeight(t *testing.T) { // now set current block height to 10 m.Call = mock.Call{ - Response: &ctypes.ResultStatus{SyncInfo: ctypes.SyncInfo{LatestBlockHeight: 10}}, + Response: &coretypes.ResultStatus{SyncInfo: coretypes.SyncInfo{LatestBlockHeight: 10}}, } // we will not wait for more than 10 blocks @@ -53,7 +53,7 @@ func TestWaitForHeight(t *testing.T) { // we use the callback to update the status height myWaiter := func(delta int64) error { // update the height for the next call - m.Call.Response = &ctypes.ResultStatus{SyncInfo: ctypes.SyncInfo{LatestBlockHeight: 15}} + m.Call.Response = &coretypes.ResultStatus{SyncInfo: coretypes.SyncInfo{LatestBlockHeight: 15}} return client.DefaultWaitStrategy(delta) } @@ -65,13 +65,13 @@ func TestWaitForHeight(t *testing.T) { pre := r.Calls[3] require.Nil(pre.Error) - prer, ok := pre.Response.(*ctypes.ResultStatus) + prer, ok := pre.Response.(*coretypes.ResultStatus) require.True(ok) assert.Equal(int64(10), prer.SyncInfo.LatestBlockHeight) post := r.Calls[4] require.Nil(post.Error) - postr, ok := post.Response.(*ctypes.ResultStatus) + postr, ok := post.Response.(*coretypes.ResultStatus) require.True(ok) assert.Equal(int64(15), postr.SyncInfo.LatestBlockHeight) } diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index 54c56f99f..5bd7b398a 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -2,13 +2,13 @@ package http import ( "context" + "errors" "net/http" "time" "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/libs/log" rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" jsonrpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" "github.com/tendermint/tendermint/types" ) @@ -121,20 +121,20 @@ func NewWithTimeout(remote string, t time.Duration) (*HTTP, error) { } // NewWithClient allows you to set a custom http client. An error is returned -// on invalid remote. The function panics when client is nil. +// on invalid remote. The function returns an error when client is nil +// or an invalid remote. func NewWithClient(remote string, c *http.Client) (*HTTP, error) { if c == nil { - panic("nil http.Client") + return nil, errors.New("nil client") } return NewWithClientAndWSOptions(remote, c, DefaultWSOptions()) } // NewWithClientAndWSOptions allows you to set a custom http client and -// WebSocket options. An error is returned on invalid remote. The function -// panics when client is nil. +// WebSocket options. An error is returned on invalid remote or nil client. func NewWithClientAndWSOptions(remote string, c *http.Client, wso WSOptions) (*HTTP, error) { if c == nil { - panic("nil http.Client") + return nil, errors.New("nil client") } rpc, err := jsonrpcclient.NewWithHTTPClient(remote, c) if err != nil { @@ -158,11 +158,6 @@ func NewWithClientAndWSOptions(remote string, c *http.Client, wso WSOptions) (*H var _ rpcclient.Client = (*HTTP)(nil) -// SetLogger sets a logger. -func (c *HTTP) SetLogger(l log.Logger) { - c.wsEvents.SetLogger(l) -} - // Remote returns the remote network address in a string form. func (c *HTTP) Remote() string { return c.remote @@ -204,8 +199,8 @@ func (b *BatchHTTP) Count() int { //----------------------------------------------------------------------------- // baseRPCClient -func (c *baseRPCClient) Status(ctx context.Context) (*ctypes.ResultStatus, error) { - result := new(ctypes.ResultStatus) +func (c *baseRPCClient) Status(ctx context.Context) (*coretypes.ResultStatus, error) { + result := new(coretypes.ResultStatus) _, err := c.caller.Call(ctx, "status", map[string]interface{}{}, result) if err != nil { return nil, err @@ -214,8 +209,8 @@ func (c *baseRPCClient) Status(ctx context.Context) (*ctypes.ResultStatus, error return result, nil } -func (c *baseRPCClient) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { - result := new(ctypes.ResultABCIInfo) +func (c *baseRPCClient) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { + result := new(coretypes.ResultABCIInfo) _, err := c.caller.Call(ctx, "abci_info", map[string]interface{}{}, result) if err != nil { return nil, err @@ -228,7 +223,7 @@ func (c *baseRPCClient) ABCIQuery( ctx context.Context, path string, data bytes.HexBytes, -) (*ctypes.ResultABCIQuery, error) { +) (*coretypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) } @@ -236,8 +231,8 @@ func (c *baseRPCClient) ABCIQueryWithOptions( ctx context.Context, path string, data bytes.HexBytes, - opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - result := new(ctypes.ResultABCIQuery) + opts rpcclient.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { + result := new(coretypes.ResultABCIQuery) _, err := c.caller.Call(ctx, "abci_query", map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, result) @@ -251,8 +246,8 @@ func (c *baseRPCClient) ABCIQueryWithOptions( func (c *baseRPCClient) BroadcastTxCommit( ctx context.Context, tx types.Tx, -) (*ctypes.ResultBroadcastTxCommit, error) { - result := new(ctypes.ResultBroadcastTxCommit) +) (*coretypes.ResultBroadcastTxCommit, error) { + result := new(coretypes.ResultBroadcastTxCommit) _, err := c.caller.Call(ctx, "broadcast_tx_commit", map[string]interface{}{"tx": tx}, result) if err != nil { return nil, err @@ -263,14 +258,14 @@ func (c *baseRPCClient) BroadcastTxCommit( func (c *baseRPCClient) BroadcastTxAsync( ctx context.Context, tx types.Tx, -) (*ctypes.ResultBroadcastTx, error) { +) (*coretypes.ResultBroadcastTx, error) { return c.broadcastTX(ctx, "broadcast_tx_async", tx) } func (c *baseRPCClient) BroadcastTxSync( ctx context.Context, tx types.Tx, -) (*ctypes.ResultBroadcastTx, error) { +) (*coretypes.ResultBroadcastTx, error) { return c.broadcastTX(ctx, "broadcast_tx_sync", tx) } @@ -278,8 +273,8 @@ func (c *baseRPCClient) broadcastTX( ctx context.Context, route string, tx types.Tx, -) (*ctypes.ResultBroadcastTx, error) { - result := new(ctypes.ResultBroadcastTx) +) (*coretypes.ResultBroadcastTx, error) { + result := new(coretypes.ResultBroadcastTx) _, err := c.caller.Call(ctx, route, map[string]interface{}{"tx": tx}, result) if err != nil { return nil, err @@ -290,8 +285,8 @@ func (c *baseRPCClient) broadcastTX( func (c *baseRPCClient) UnconfirmedTxs( ctx context.Context, limit *int, -) (*ctypes.ResultUnconfirmedTxs, error) { - result := new(ctypes.ResultUnconfirmedTxs) +) (*coretypes.ResultUnconfirmedTxs, error) { + result := new(coretypes.ResultUnconfirmedTxs) params := make(map[string]interface{}) if limit != nil { params["limit"] = limit @@ -303,8 +298,8 @@ func (c *baseRPCClient) UnconfirmedTxs( return result, nil } -func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { - result := new(ctypes.ResultUnconfirmedTxs) +func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) { + result := new(coretypes.ResultUnconfirmedTxs) _, err := c.caller.Call(ctx, "num_unconfirmed_txs", map[string]interface{}{}, result) if err != nil { return nil, err @@ -312,8 +307,8 @@ func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUn return result, nil } -func (c *baseRPCClient) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { - result := new(ctypes.ResultCheckTx) +func (c *baseRPCClient) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { + result := new(coretypes.ResultCheckTx) _, err := c.caller.Call(ctx, "check_tx", map[string]interface{}{"tx": tx}, result) if err != nil { return nil, err @@ -321,8 +316,16 @@ func (c *baseRPCClient) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.Resul return result, nil } -func (c *baseRPCClient) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { - result := new(ctypes.ResultNetInfo) +func (c *baseRPCClient) RemoveTx(ctx context.Context, txKey types.TxKey) error { + _, err := c.caller.Call(ctx, "remove_tx", map[string]interface{}{"tx_key": txKey}, nil) + if err != nil { + return err + } + return nil +} + +func (c *baseRPCClient) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { + result := new(coretypes.ResultNetInfo) _, err := c.caller.Call(ctx, "net_info", map[string]interface{}{}, result) if err != nil { return nil, err @@ -330,8 +333,8 @@ func (c *baseRPCClient) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, err return result, nil } -func (c *baseRPCClient) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { - result := new(ctypes.ResultDumpConsensusState) +func (c *baseRPCClient) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { + result := new(coretypes.ResultDumpConsensusState) _, err := c.caller.Call(ctx, "dump_consensus_state", map[string]interface{}{}, result) if err != nil { return nil, err @@ -339,8 +342,8 @@ func (c *baseRPCClient) DumpConsensusState(ctx context.Context) (*ctypes.ResultD return result, nil } -func (c *baseRPCClient) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { - result := new(ctypes.ResultConsensusState) +func (c *baseRPCClient) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { + result := new(coretypes.ResultConsensusState) _, err := c.caller.Call(ctx, "consensus_state", map[string]interface{}{}, result) if err != nil { return nil, err @@ -351,8 +354,8 @@ func (c *baseRPCClient) ConsensusState(ctx context.Context) (*ctypes.ResultConse func (c *baseRPCClient) ConsensusParams( ctx context.Context, height *int64, -) (*ctypes.ResultConsensusParams, error) { - result := new(ctypes.ResultConsensusParams) +) (*coretypes.ResultConsensusParams, error) { + result := new(coretypes.ResultConsensusParams) params := make(map[string]interface{}) if height != nil { params["height"] = height @@ -364,8 +367,8 @@ func (c *baseRPCClient) ConsensusParams( return result, nil } -func (c *baseRPCClient) Health(ctx context.Context) (*ctypes.ResultHealth, error) { - result := new(ctypes.ResultHealth) +func (c *baseRPCClient) Health(ctx context.Context) (*coretypes.ResultHealth, error) { + result := new(coretypes.ResultHealth) _, err := c.caller.Call(ctx, "health", map[string]interface{}{}, result) if err != nil { return nil, err @@ -377,8 +380,8 @@ func (c *baseRPCClient) BlockchainInfo( ctx context.Context, minHeight, maxHeight int64, -) (*ctypes.ResultBlockchainInfo, error) { - result := new(ctypes.ResultBlockchainInfo) +) (*coretypes.ResultBlockchainInfo, error) { + result := new(coretypes.ResultBlockchainInfo) _, err := c.caller.Call(ctx, "blockchain", map[string]interface{}{"minHeight": minHeight, "maxHeight": maxHeight}, result) @@ -388,8 +391,8 @@ func (c *baseRPCClient) BlockchainInfo( return result, nil } -func (c *baseRPCClient) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { - result := new(ctypes.ResultGenesis) +func (c *baseRPCClient) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { + result := new(coretypes.ResultGenesis) _, err := c.caller.Call(ctx, "genesis", map[string]interface{}{}, result) if err != nil { return nil, err @@ -397,8 +400,8 @@ func (c *baseRPCClient) Genesis(ctx context.Context) (*ctypes.ResultGenesis, err return result, nil } -func (c *baseRPCClient) GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) { - result := new(ctypes.ResultGenesisChunk) +func (c *baseRPCClient) GenesisChunked(ctx context.Context, id uint) (*coretypes.ResultGenesisChunk, error) { + result := new(coretypes.ResultGenesisChunk) _, err := c.caller.Call(ctx, "genesis_chunked", map[string]interface{}{"chunk": id}, result) if err != nil { return nil, err @@ -406,8 +409,8 @@ func (c *baseRPCClient) GenesisChunked(ctx context.Context, id uint) (*ctypes.Re return result, nil } -func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { - result := new(ctypes.ResultBlock) +func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { + result := new(coretypes.ResultBlock) params := make(map[string]interface{}) if height != nil { params["height"] = height @@ -419,8 +422,8 @@ func (c *baseRPCClient) Block(ctx context.Context, height *int64) (*ctypes.Resul return result, nil } -func (c *baseRPCClient) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { - result := new(ctypes.ResultBlock) +func (c *baseRPCClient) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { + result := new(coretypes.ResultBlock) params := map[string]interface{}{ "hash": hash, } @@ -434,8 +437,8 @@ func (c *baseRPCClient) BlockByHash(ctx context.Context, hash []byte) (*ctypes.R func (c *baseRPCClient) BlockResults( ctx context.Context, height *int64, -) (*ctypes.ResultBlockResults, error) { - result := new(ctypes.ResultBlockResults) +) (*coretypes.ResultBlockResults, error) { + result := new(coretypes.ResultBlockResults) params := make(map[string]interface{}) if height != nil { params["height"] = height @@ -447,8 +450,8 @@ func (c *baseRPCClient) BlockResults( return result, nil } -func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { - result := new(ctypes.ResultCommit) +func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { + result := new(coretypes.ResultCommit) params := make(map[string]interface{}) if height != nil { params["height"] = height @@ -460,8 +463,8 @@ func (c *baseRPCClient) Commit(ctx context.Context, height *int64) (*ctypes.Resu return result, nil } -func (c *baseRPCClient) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { - result := new(ctypes.ResultTx) +func (c *baseRPCClient) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { + result := new(coretypes.ResultTx) params := map[string]interface{}{ "hash": hash, "prove": prove, @@ -480,9 +483,9 @@ func (c *baseRPCClient) TxSearch( page, perPage *int, orderBy string, -) (*ctypes.ResultTxSearch, error) { +) (*coretypes.ResultTxSearch, error) { - result := new(ctypes.ResultTxSearch) + result := new(coretypes.ResultTxSearch) params := map[string]interface{}{ "query": query, "prove": prove, @@ -509,9 +512,9 @@ func (c *baseRPCClient) BlockSearch( query string, page, perPage *int, orderBy string, -) (*ctypes.ResultBlockSearch, error) { +) (*coretypes.ResultBlockSearch, error) { - result := new(ctypes.ResultBlockSearch) + result := new(coretypes.ResultBlockSearch) params := map[string]interface{}{ "query": query, "order_by": orderBy, @@ -537,8 +540,8 @@ func (c *baseRPCClient) Validators( height *int64, page, perPage *int, -) (*ctypes.ResultValidators, error) { - result := new(ctypes.ResultValidators) +) (*coretypes.ResultValidators, error) { + result := new(coretypes.ResultValidators) params := make(map[string]interface{}) if page != nil { params["page"] = page @@ -559,8 +562,8 @@ func (c *baseRPCClient) Validators( func (c *baseRPCClient) BroadcastEvidence( ctx context.Context, ev types.Evidence, -) (*ctypes.ResultBroadcastEvidence, error) { - result := new(ctypes.ResultBroadcastEvidence) +) (*coretypes.ResultBroadcastEvidence, error) { + result := new(coretypes.ResultBroadcastEvidence) _, err := c.caller.Call(ctx, "broadcast_evidence", map[string]interface{}{"evidence": ev}, result) if err != nil { return nil, err diff --git a/rpc/client/http/ws.go b/rpc/client/http/ws.go index afdaec861..0f908e271 100644 --- a/rpc/client/http/ws.go +++ b/rpc/client/http/ws.go @@ -9,15 +9,12 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmjson "github.com/tendermint/tendermint/libs/json" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/libs/pubsub" rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" jsonrpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" ) -var errNotRunning = errors.New("client is not running. Use .Start() method to start") - // WSOptions for the WS part of the HTTP client. type WSOptions struct { Path string // path (e.g. "/ws") @@ -48,7 +45,7 @@ func (wso WSOptions) Validate() error { // wsEvents is a wrapper around WSClient, which implements EventsClient. type wsEvents struct { - service.BaseService + *rpcclient.RunState ws *jsonrpcclient.WSClient mtx tmsync.RWMutex @@ -56,7 +53,7 @@ type wsEvents struct { } type wsSubscription struct { - res chan ctypes.ResultEvent + res chan coretypes.ResultEvent id string query string } @@ -78,7 +75,7 @@ func newWsEvents(remote string, wso WSOptions) (*wsEvents, error) { w := &wsEvents{ subscriptions: make(map[string]*wsSubscription), } - w.BaseService = *service.NewBaseService(nil, "wsEvents", w) + w.RunState = rpcclient.NewRunState("wsEvents", nil) var err error w.ws, err = jsonrpcclient.NewWSWithOptions(remote, wso.Path, wso.WSOptions) @@ -94,23 +91,20 @@ func newWsEvents(remote string, wso WSOptions) (*wsEvents, error) { return w, nil } -// OnStart implements service.Service by starting WSClient and event loop. -func (w *wsEvents) OnStart() error { +// Start starts the websocket client and the event loop. +func (w *wsEvents) Start() error { if err := w.ws.Start(); err != nil { return err } - go w.eventListener() - return nil } -// OnStop implements service.Service by stopping WSClient. -func (w *wsEvents) OnStop() { - if err := w.ws.Stop(); err != nil { - w.Logger.Error("Can't stop ws client", "err", err) - } -} +// IsRunning reports whether the websocket client is running. +func (w *wsEvents) IsRunning() bool { return w.ws.IsRunning() } + +// Stop shuts down the websocket client. +func (w *wsEvents) Stop() error { return w.ws.Stop() } // Subscribe implements EventsClient by using WSClient to subscribe given // subscriber to query. By default, it returns a channel with cap=1. Error is @@ -125,10 +119,10 @@ func (w *wsEvents) OnStop() { // // It returns an error if wsEvents is not running. func (w *wsEvents) Subscribe(ctx context.Context, subscriber, query string, - outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { + outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) { if !w.IsRunning() { - return nil, errNotRunning + return nil, rpcclient.ErrClientNotRunning } if err := w.ws.Subscribe(ctx, query); err != nil { @@ -140,7 +134,7 @@ func (w *wsEvents) Subscribe(ctx context.Context, subscriber, query string, outCap = outCapacity[0] } - outc := make(chan ctypes.ResultEvent, outCap) + outc := make(chan coretypes.ResultEvent, outCap) w.mtx.Lock() defer w.mtx.Unlock() // subscriber param is ignored because Tendermint will override it with @@ -156,7 +150,7 @@ func (w *wsEvents) Subscribe(ctx context.Context, subscriber, query string, // It returns an error if wsEvents is not running. func (w *wsEvents) Unsubscribe(ctx context.Context, subscriber, query string) error { if !w.IsRunning() { - return errNotRunning + return rpcclient.ErrClientNotRunning } if err := w.ws.Unsubscribe(ctx, query); err != nil { @@ -182,7 +176,7 @@ func (w *wsEvents) Unsubscribe(ctx context.Context, subscriber, query string) er // It returns an error if wsEvents is not running. func (w *wsEvents) UnsubscribeAll(ctx context.Context, subscriber string) error { if !w.IsRunning() { - return errNotRunning + return rpcclient.ErrClientNotRunning } if err := w.ws.UnsubscribeAll(ctx); err != nil { @@ -219,7 +213,7 @@ func (w *wsEvents) redoSubscriptionsAfter(d time.Duration) { } func isErrAlreadySubscribed(err error) bool { - return strings.Contains(err.Error(), tmpubsub.ErrAlreadySubscribed.Error()) + return strings.Contains(err.Error(), pubsub.ErrAlreadySubscribed.Error()) } func (w *wsEvents) eventListener() { @@ -244,7 +238,7 @@ func (w *wsEvents) eventListener() { continue } - result := new(ctypes.ResultEvent) + result := new(coretypes.ResultEvent) err := tmjson.Unmarshal(resp.Result, result) if err != nil { w.Logger.Error("failed to unmarshal response", "err", err) diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 3547b42ae..474eb9937 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -24,17 +24,26 @@ import ( "context" "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/libs/service" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) //go:generate ../../scripts/mockery_generate.sh Client -// Client wraps most important rpc calls a client would make if you want to -// listen for events, test if it also implements events.EventSwitch. +// Client describes the interface of Tendermint RPC client implementations. type Client interface { - service.Service + // These methods define the operational structure of the client. + + // Start the client. Start must report an error if the client is running. + Start() error + + // Stop the client. Stop must report an error if the client is not running. + Stop() error + + // IsRunning reports whether the client is running. + IsRunning() bool + + // These embedded interfaces define the callable methods of the service. ABCIClient EventsClient HistoryClient @@ -52,26 +61,26 @@ type Client interface { // is easier to mock. type ABCIClient interface { // Reading from abci app - ABCIInfo(context.Context) (*ctypes.ResultABCIInfo, error) - ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) + ABCIInfo(context.Context) (*coretypes.ResultABCIInfo, error) + ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes, - opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) + opts ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) // Writing to abci app - BroadcastTxCommit(context.Context, types.Tx) (*ctypes.ResultBroadcastTxCommit, error) - BroadcastTxAsync(context.Context, types.Tx) (*ctypes.ResultBroadcastTx, error) - BroadcastTxSync(context.Context, types.Tx) (*ctypes.ResultBroadcastTx, error) + BroadcastTxCommit(context.Context, types.Tx) (*coretypes.ResultBroadcastTxCommit, error) + BroadcastTxAsync(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error) + BroadcastTxSync(context.Context, types.Tx) (*coretypes.ResultBroadcastTx, error) } // SignClient groups together the functionality needed to get valid signatures // and prove anything about the chain. type SignClient interface { - Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) - BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) - BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) - Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) - Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) - Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) + Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) + BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) + BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) + Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) + Validators(ctx context.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) + Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) // TxSearch defines a method to search for a paginated set of transactions by // DeliverTx event search criteria. @@ -81,7 +90,7 @@ type SignClient interface { prove bool, page, perPage *int, orderBy string, - ) (*ctypes.ResultTxSearch, error) + ) (*coretypes.ResultTxSearch, error) // BlockSearch defines a method to search for a paginated set of blocks by // BeginBlock and EndBlock event search criteria. @@ -90,29 +99,29 @@ type SignClient interface { query string, page, perPage *int, orderBy string, - ) (*ctypes.ResultBlockSearch, error) + ) (*coretypes.ResultBlockSearch, error) } // HistoryClient provides access to data from genesis to now in large chunks. type HistoryClient interface { - Genesis(context.Context) (*ctypes.ResultGenesis, error) - GenesisChunked(context.Context, uint) (*ctypes.ResultGenesisChunk, error) - BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) + Genesis(context.Context) (*coretypes.ResultGenesis, error) + GenesisChunked(context.Context, uint) (*coretypes.ResultGenesisChunk, error) + BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) } // StatusClient provides access to general chain info. type StatusClient interface { - Status(context.Context) (*ctypes.ResultStatus, error) + Status(context.Context) (*coretypes.ResultStatus, error) } // NetworkClient is general info about the network state. May not be needed // usually. type NetworkClient interface { - NetInfo(context.Context) (*ctypes.ResultNetInfo, error) - DumpConsensusState(context.Context) (*ctypes.ResultDumpConsensusState, error) - ConsensusState(context.Context) (*ctypes.ResultConsensusState, error) - ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) - Health(context.Context) (*ctypes.ResultHealth, error) + NetInfo(context.Context) (*coretypes.ResultNetInfo, error) + DumpConsensusState(context.Context) (*coretypes.ResultDumpConsensusState, error) + ConsensusState(context.Context) (*coretypes.ResultConsensusState, error) + ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) + Health(context.Context) (*coretypes.ResultHealth, error) } // EventsClient is reactive, you can subscribe to any message, given the proper @@ -125,7 +134,7 @@ type EventsClient interface { // // ctx cannot be used to unsubscribe. To unsubscribe, use either Unsubscribe // or UnsubscribeAll. - Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) + Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) //nolint:lll // Unsubscribe unsubscribes given subscriber from query. Unsubscribe(ctx context.Context, subscriber, query string) error // UnsubscribeAll unsubscribes given subscriber from all the queries. @@ -134,15 +143,16 @@ type EventsClient interface { // MempoolClient shows us data about current mempool state. type MempoolClient interface { - UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) - NumUnconfirmedTxs(context.Context) (*ctypes.ResultUnconfirmedTxs, error) - CheckTx(context.Context, types.Tx) (*ctypes.ResultCheckTx, error) + UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) + NumUnconfirmedTxs(context.Context) (*coretypes.ResultUnconfirmedTxs, error) + CheckTx(context.Context, types.Tx) (*coretypes.ResultCheckTx, error) + RemoveTx(context.Context, types.TxKey) error } // EvidenceClient is used for submitting an evidence of the malicious // behavior. type EvidenceClient interface { - BroadcastEvidence(context.Context, types.Evidence) (*ctypes.ResultBroadcastEvidence, error) + BroadcastEvidence(context.Context, types.Evidence) (*coretypes.ResultBroadcastEvidence, error) } // RemoteClient is a Client, which can also return the remote network address. diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 0663ebf67..21ca6e6f1 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -2,16 +2,17 @@ package local import ( "context" + "errors" "fmt" "time" + rpccore "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/libs/pubsub/query" rpcclient "github.com/tendermint/tendermint/rpc/client" - rpccore "github.com/tendermint/tendermint/rpc/core" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) @@ -46,15 +47,15 @@ type Local struct { // NodeService describes the portion of the node interface that the // local RPC client constructor needs to build a local client. type NodeService interface { - ConfigureRPC() (*rpccore.Environment, error) + RPCEnvironment() *rpccore.Environment EventBus() *types.EventBus } // New configures a client that calls the Node directly. func New(node NodeService) (*Local, error) { - env, err := node.ConfigureRPC() - if err != nil { - return nil, err + env := node.RPCEnvironment() + if env == nil { + return nil, errors.New("rpc is nil") } return &Local{ EventBus: node.EventBus(), @@ -71,15 +72,15 @@ func (c *Local) SetLogger(l log.Logger) { c.Logger = l } -func (c *Local) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (c *Local) Status(ctx context.Context) (*coretypes.ResultStatus, error) { return c.env.Status(c.ctx) } -func (c *Local) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { +func (c *Local) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { return c.env.ABCIInfo(c.ctx) } -func (c *Local) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { +func (c *Local) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(ctx, path, data, rpcclient.DefaultABCIQueryOptions) } @@ -87,134 +88,124 @@ func (c *Local) ABCIQueryWithOptions( ctx context.Context, path string, data bytes.HexBytes, - opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts rpcclient.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { return c.env.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove) } -func (c *Local) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c *Local) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { return c.env.BroadcastTxCommit(c.ctx, tx) } -func (c *Local) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Local) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.env.BroadcastTxAsync(c.ctx, tx) } -func (c *Local) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Local) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.env.BroadcastTxSync(c.ctx, tx) } -func (c *Local) UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) { +func (c *Local) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { return c.env.UnconfirmedTxs(c.ctx, limit) } -func (c *Local) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirmedTxs, error) { +func (c *Local) NumUnconfirmedTxs(ctx context.Context) (*coretypes.ResultUnconfirmedTxs, error) { return c.env.NumUnconfirmedTxs(c.ctx) } -func (c *Local) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +func (c *Local) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { return c.env.CheckTx(c.ctx, tx) } -func (c *Local) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { +func (c *Local) RemoveTx(ctx context.Context, txKey types.TxKey) error { + return c.env.Mempool.RemoveTxByKey(txKey) +} + +func (c *Local) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { return c.env.NetInfo(c.ctx) } -func (c *Local) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { +func (c *Local) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { return c.env.DumpConsensusState(c.ctx) } -func (c *Local) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { +func (c *Local) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { return c.env.GetConsensusState(c.ctx) } -func (c *Local) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { +func (c *Local) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { return c.env.ConsensusParams(c.ctx, height) } -func (c *Local) Health(ctx context.Context) (*ctypes.ResultHealth, error) { +func (c *Local) Health(ctx context.Context) (*coretypes.ResultHealth, error) { return c.env.Health(c.ctx) } -func (c *Local) DialSeeds(ctx context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { - return c.env.UnsafeDialSeeds(c.ctx, seeds) -} - -func (c *Local) DialPeers( - ctx context.Context, - peers []string, - persistent, - unconditional, - private bool, -) (*ctypes.ResultDialPeers, error) { - return c.env.UnsafeDialPeers(c.ctx, peers, persistent, unconditional, private) -} - -func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { +func (c *Local) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { //nolint:lll return c.env.BlockchainInfo(c.ctx, minHeight, maxHeight) } -func (c *Local) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { +func (c *Local) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { return c.env.Genesis(c.ctx) } -func (c *Local) GenesisChunked(ctx context.Context, id uint) (*ctypes.ResultGenesisChunk, error) { +func (c *Local) GenesisChunked(ctx context.Context, id uint) (*coretypes.ResultGenesisChunk, error) { return c.env.GenesisChunked(c.ctx, id) } -func (c *Local) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { +func (c *Local) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { return c.env.Block(c.ctx, height) } -func (c *Local) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { +func (c *Local) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { return c.env.BlockByHash(c.ctx, hash) } -func (c *Local) BlockResults(ctx context.Context, height *int64) (*ctypes.ResultBlockResults, error) { +func (c *Local) BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) { return c.env.BlockResults(c.ctx, height) } -func (c *Local) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { +func (c *Local) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { return c.env.Commit(c.ctx, height) } -func (c *Local) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { +func (c *Local) Validators(ctx context.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) { //nolint:lll return c.env.Validators(c.ctx, height, page, perPage) } -func (c *Local) Tx(ctx context.Context, hash []byte, prove bool) (*ctypes.ResultTx, error) { +func (c *Local) Tx(ctx context.Context, hash bytes.HexBytes, prove bool) (*coretypes.ResultTx, error) { return c.env.Tx(c.ctx, hash, prove) } func (c *Local) TxSearch( _ context.Context, - query string, + queryString string, prove bool, page, perPage *int, orderBy string, -) (*ctypes.ResultTxSearch, error) { - return c.env.TxSearch(c.ctx, query, prove, page, perPage, orderBy) +) (*coretypes.ResultTxSearch, error) { + return c.env.TxSearch(c.ctx, queryString, prove, page, perPage, orderBy) } func (c *Local) BlockSearch( _ context.Context, - query string, + queryString string, page, perPage *int, orderBy string, -) (*ctypes.ResultBlockSearch, error) { - return c.env.BlockSearch(c.ctx, query, page, perPage, orderBy) +) (*coretypes.ResultBlockSearch, error) { + return c.env.BlockSearch(c.ctx, queryString, page, perPage, orderBy) } -func (c *Local) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { +func (c *Local) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { return c.env.BroadcastEvidence(c.ctx, ev) } func (c *Local) Subscribe( ctx context.Context, subscriber, - query string, - outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { - q, err := tmquery.New(query) + queryString string, + outCapacity ...int) (out <-chan coretypes.ResultEvent, err error) { + q, err := query.New(queryString) if err != nil { return nil, fmt.Errorf("failed to parse query: %w", err) } @@ -234,7 +225,7 @@ func (c *Local) Subscribe( return nil, fmt.Errorf("failed to subscribe: %w", err) } - outc := make(chan ctypes.ResultEvent, outCap) + outc := make(chan coretypes.ResultEvent, outCap) go c.eventsRoutine(sub, subscriber, q, outc) return outc, nil @@ -243,12 +234,12 @@ func (c *Local) Subscribe( func (c *Local) eventsRoutine( sub types.Subscription, subscriber string, - q tmpubsub.Query, - outc chan<- ctypes.ResultEvent) { + q pubsub.Query, + outc chan<- coretypes.ResultEvent) { for { select { case msg := <-sub.Out(): - result := ctypes.ResultEvent{ + result := coretypes.ResultEvent{ SubscriptionID: msg.SubscriptionID(), Query: q.String(), Data: msg.Data(), @@ -265,7 +256,7 @@ func (c *Local) eventsRoutine( } } case <-sub.Canceled(): - if sub.Err() == tmpubsub.ErrUnsubscribed { + if sub.Err() == pubsub.ErrUnsubscribed { return } @@ -281,7 +272,7 @@ func (c *Local) eventsRoutine( } // Try to resubscribe with exponential backoff. -func (c *Local) resubscribe(subscriber string, q tmpubsub.Query) types.Subscription { +func (c *Local) resubscribe(subscriber string, q pubsub.Query) types.Subscription { attempts := 0 for { if !c.IsRunning() { @@ -298,17 +289,17 @@ func (c *Local) resubscribe(subscriber string, q tmpubsub.Query) types.Subscript } } -func (c *Local) Unsubscribe(ctx context.Context, subscriber, query string) error { - args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber} +func (c *Local) Unsubscribe(ctx context.Context, subscriber, queryString string) error { + args := pubsub.UnsubscribeArgs{Subscriber: subscriber} var err error - args.Query, err = tmquery.New(query) + args.Query, err = query.New(queryString) if err != nil { // if this isn't a valid query it might be an ID, so // we'll try that. It'll turn into an error when we // try to unsubscribe. Eventually, perhaps, we'll want // to change the interface to only allow // unsubscription by ID, but that's a larger change. - args.ID = query + args.ID = queryString } return c.EventBus.Unsubscribe(ctx, args) } diff --git a/rpc/client/main_test.go b/rpc/client/main_test.go index 4e2c0405c..c2e0dc3cd 100644 --- a/rpc/client/main_test.go +++ b/rpc/client/main_test.go @@ -7,6 +7,7 @@ import ( "os" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/config" @@ -30,9 +31,11 @@ func NodeSuite(t *testing.T) (service.Service, *config.Config) { node, closer, err := rpctest.StartTendermint(ctx, conf, app, rpctest.SuppressStdout) require.NoError(t, err) t.Cleanup(func() { - _ = closer(ctx) cancel() - app.Close() + assert.NoError(t, node.Stop()) + assert.NoError(t, closer(ctx)) + assert.NoError(t, app.Close()) + node.Wait() _ = os.RemoveAll(dir) }) return node, conf diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index 0737deec0..700b08f5e 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -4,10 +4,10 @@ import ( "context" abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/internal/proxy" "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) @@ -24,11 +24,11 @@ var ( _ client.ABCIClient = (*ABCIRecorder)(nil) ) -func (a ABCIApp) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { - return &ctypes.ResultABCIInfo{Response: a.App.Info(proxy.RequestInfo)}, nil +func (a ABCIApp) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { + return &coretypes.ResultABCIInfo{Response: a.App.Info(proxy.RequestInfo)}, nil } -func (a ABCIApp) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { +func (a ABCIApp) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { return a.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) } @@ -36,21 +36,21 @@ func (a ABCIApp) ABCIQueryWithOptions( ctx context.Context, path string, data bytes.HexBytes, - opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { q := a.App.Query(abci.RequestQuery{ Data: data, Path: path, Height: opts.Height, Prove: opts.Prove, }) - return &ctypes.ResultABCIQuery{Response: q}, nil + return &coretypes.ResultABCIQuery{Response: q}, nil } // NOTE: Caller should call a.App.Commit() separately, // this function does not actually wait for a commit. // TODO: Make it wait for a commit and set res.Height appropriately. -func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - res := ctypes.ResultBroadcastTxCommit{} +func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { + res := coretypes.ResultBroadcastTxCommit{} res.CheckTx = a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) if res.CheckTx.IsErr() { return &res, nil @@ -60,13 +60,13 @@ func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.Re return &res, nil } -func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) // and this gets written in a background thread... if !c.IsErr() { go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() } - return &ctypes.ResultBroadcastTx{ + return &coretypes.ResultBroadcastTx{ Code: c.Code, Data: c.Data, Log: c.Log, @@ -75,13 +75,13 @@ func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.Res }, nil } -func (a ABCIApp) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (a ABCIApp) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) // and this gets written in a background thread... if !c.IsErr() { go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() } - return &ctypes.ResultBroadcastTx{ + return &coretypes.ResultBroadcastTx{ Code: c.Code, Data: c.Data, Log: c.Log, @@ -100,15 +100,15 @@ type ABCIMock struct { Broadcast Call } -func (m ABCIMock) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { +func (m ABCIMock) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { res, err := m.Info.GetResponse(nil) if err != nil { return nil, err } - return &ctypes.ResultABCIInfo{Response: res.(abci.ResponseInfo)}, nil + return &coretypes.ResultABCIInfo{Response: res.(abci.ResponseInfo)}, nil } -func (m ABCIMock) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { +func (m ABCIMock) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { return m.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) } @@ -116,37 +116,37 @@ func (m ABCIMock) ABCIQueryWithOptions( ctx context.Context, path string, data bytes.HexBytes, - opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Prove}) if err != nil { return nil, err } resQuery := res.(abci.ResponseQuery) - return &ctypes.ResultABCIQuery{Response: resQuery}, nil + return &coretypes.ResultABCIQuery{Response: resQuery}, nil } -func (m ABCIMock) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (m ABCIMock) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { res, err := m.BroadcastCommit.GetResponse(tx) if err != nil { return nil, err } - return res.(*ctypes.ResultBroadcastTxCommit), nil + return res.(*coretypes.ResultBroadcastTxCommit), nil } -func (m ABCIMock) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (m ABCIMock) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { res, err := m.Broadcast.GetResponse(tx) if err != nil { return nil, err } - return res.(*ctypes.ResultBroadcastTx), nil + return res.(*coretypes.ResultBroadcastTx), nil } -func (m ABCIMock) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (m ABCIMock) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { res, err := m.Broadcast.GetResponse(tx) if err != nil { return nil, err } - return res.(*ctypes.ResultBroadcastTx), nil + return res.(*coretypes.ResultBroadcastTx), nil } // ABCIRecorder can wrap another type (ABCIApp, ABCIMock, or Client) @@ -174,7 +174,7 @@ func (r *ABCIRecorder) addCall(call Call) { r.Calls = append(r.Calls, call) } -func (r *ABCIRecorder) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { +func (r *ABCIRecorder) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { res, err := r.Client.ABCIInfo(ctx) r.addCall(Call{ Name: "abci_info", @@ -188,7 +188,7 @@ func (r *ABCIRecorder) ABCIQuery( ctx context.Context, path string, data bytes.HexBytes, -) (*ctypes.ResultABCIQuery, error) { +) (*coretypes.ResultABCIQuery, error) { return r.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) } @@ -196,7 +196,7 @@ func (r *ABCIRecorder) ABCIQueryWithOptions( ctx context.Context, path string, data bytes.HexBytes, - opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { res, err := r.Client.ABCIQueryWithOptions(ctx, path, data, opts) r.addCall(Call{ Name: "abci_query", @@ -207,7 +207,7 @@ func (r *ABCIRecorder) ABCIQueryWithOptions( return res, err } -func (r *ABCIRecorder) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (r *ABCIRecorder) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { res, err := r.Client.BroadcastTxCommit(ctx, tx) r.addCall(Call{ Name: "broadcast_tx_commit", @@ -218,7 +218,7 @@ func (r *ABCIRecorder) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*cty return res, err } -func (r *ABCIRecorder) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (r *ABCIRecorder) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { res, err := r.Client.BroadcastTxAsync(ctx, tx) r.addCall(Call{ Name: "broadcast_tx_async", @@ -229,7 +229,7 @@ func (r *ABCIRecorder) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctyp return res, err } -func (r *ABCIRecorder) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (r *ABCIRecorder) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { res, err := r.Client.BroadcastTxSync(ctx, tx) r.addCall(Call{ Name: "broadcast_tx_sync", diff --git a/rpc/client/mock/abci_test.go b/rpc/client/mock/abci_test.go index d164b275a..25fbbc05d 100644 --- a/rpc/client/mock/abci_test.go +++ b/rpc/client/mock/abci_test.go @@ -14,7 +14,7 @@ import ( "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client/mock" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" "github.com/tendermint/tendermint/types" ) @@ -36,7 +36,7 @@ func TestABCIMock(t *testing.T) { // Broadcast commit depends on call BroadcastCommit: mock.Call{ Args: goodTx, - Response: &ctypes.ResultBroadcastTxCommit{ + Response: &coretypes.ResultBroadcastTxCommit{ CheckTx: abci.ResponseCheckTx{Data: bytes.HexBytes("stand")}, DeliverTx: abci.ResponseDeliverTx{Data: bytes.HexBytes("deliver")}, }, @@ -112,7 +112,7 @@ func TestABCIRecorder(t *testing.T) { assert.Nil(info.Error) assert.Nil(info.Args) require.NotNil(info.Response) - ir, ok := info.Response.(*ctypes.ResultABCIInfo) + ir, ok := info.Response.(*coretypes.ResultABCIInfo) require.True(ok) assert.Equal("data", ir.Response.Data) assert.Equal("v0.9.9", ir.Response.Version) diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 57e96fb09..a1a42e28d 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -18,26 +18,17 @@ import ( "context" "reflect" + "github.com/tendermint/tendermint/internal/rpc/core" "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/rpc/client" - "github.com/tendermint/tendermint/rpc/core" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/types" ) // Client wraps arbitrary implementations of the various interfaces. type Client struct { - client.ABCIClient - client.SignClient - client.HistoryClient - client.StatusClient - client.EventsClient - client.EvidenceClient - client.MempoolClient - service.Service - + client.Client env *core.Environment } @@ -84,15 +75,15 @@ func (c Call) GetResponse(args interface{}) (interface{}, error) { return nil, c.Error } -func (c Client) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (c Client) Status(ctx context.Context) (*coretypes.ResultStatus, error) { return c.env.Status(&rpctypes.Context{}) } -func (c Client) ABCIInfo(ctx context.Context) (*ctypes.ResultABCIInfo, error) { +func (c Client) ABCIInfo(ctx context.Context) (*coretypes.ResultABCIInfo, error) { return c.env.ABCIInfo(&rpctypes.Context{}) } -func (c Client) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { +func (c Client) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { return c.ABCIQueryWithOptions(ctx, path, data, client.DefaultABCIQueryOptions) } @@ -100,84 +91,70 @@ func (c Client) ABCIQueryWithOptions( ctx context.Context, path string, data bytes.HexBytes, - opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { return c.env.ABCIQuery(&rpctypes.Context{}, path, data, opts.Height, opts.Prove) } -func (c Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { return c.env.BroadcastTxCommit(&rpctypes.Context{}, tx) } -func (c Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.env.BroadcastTxAsync(&rpctypes.Context{}, tx) } -func (c Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*coretypes.ResultBroadcastTx, error) { return c.env.BroadcastTxSync(&rpctypes.Context{}, tx) } -func (c Client) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +func (c Client) CheckTx(ctx context.Context, tx types.Tx) (*coretypes.ResultCheckTx, error) { return c.env.CheckTx(&rpctypes.Context{}, tx) } -func (c Client) NetInfo(ctx context.Context) (*ctypes.ResultNetInfo, error) { +func (c Client) NetInfo(ctx context.Context) (*coretypes.ResultNetInfo, error) { return c.env.NetInfo(&rpctypes.Context{}) } -func (c Client) ConsensusState(ctx context.Context) (*ctypes.ResultConsensusState, error) { +func (c Client) ConsensusState(ctx context.Context) (*coretypes.ResultConsensusState, error) { return c.env.GetConsensusState(&rpctypes.Context{}) } -func (c Client) DumpConsensusState(ctx context.Context) (*ctypes.ResultDumpConsensusState, error) { +func (c Client) DumpConsensusState(ctx context.Context) (*coretypes.ResultDumpConsensusState, error) { return c.env.DumpConsensusState(&rpctypes.Context{}) } -func (c Client) ConsensusParams(ctx context.Context, height *int64) (*ctypes.ResultConsensusParams, error) { +func (c Client) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { return c.env.ConsensusParams(&rpctypes.Context{}, height) } -func (c Client) Health(ctx context.Context) (*ctypes.ResultHealth, error) { +func (c Client) Health(ctx context.Context) (*coretypes.ResultHealth, error) { return c.env.Health(&rpctypes.Context{}) } -func (c Client) DialSeeds(ctx context.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { - return c.env.UnsafeDialSeeds(&rpctypes.Context{}, seeds) -} - -func (c Client) DialPeers( - ctx context.Context, - peers []string, - persistent, - unconditional, - private bool, -) (*ctypes.ResultDialPeers, error) { - return c.env.UnsafeDialPeers(&rpctypes.Context{}, peers, persistent, unconditional, private) -} - -func (c Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { +func (c Client) BlockchainInfo(ctx context.Context, minHeight, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { //nolint:lll return c.env.BlockchainInfo(&rpctypes.Context{}, minHeight, maxHeight) } -func (c Client) Genesis(ctx context.Context) (*ctypes.ResultGenesis, error) { +func (c Client) Genesis(ctx context.Context) (*coretypes.ResultGenesis, error) { return c.env.Genesis(&rpctypes.Context{}) } -func (c Client) Block(ctx context.Context, height *int64) (*ctypes.ResultBlock, error) { +func (c Client) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { return c.env.Block(&rpctypes.Context{}, height) } -func (c Client) BlockByHash(ctx context.Context, hash []byte) (*ctypes.ResultBlock, error) { +func (c Client) BlockByHash(ctx context.Context, hash bytes.HexBytes) (*coretypes.ResultBlock, error) { return c.env.BlockByHash(&rpctypes.Context{}, hash) } -func (c Client) Commit(ctx context.Context, height *int64) (*ctypes.ResultCommit, error) { +func (c Client) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { return c.env.Commit(&rpctypes.Context{}, height) } -func (c Client) Validators(ctx context.Context, height *int64, page, perPage *int) (*ctypes.ResultValidators, error) { +func (c Client) Validators(ctx context.Context, height *int64, page, perPage *int) (*coretypes.ResultValidators, error) { //nolint:lll return c.env.Validators(&rpctypes.Context{}, height, page, perPage) } -func (c Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { +func (c Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { return c.env.BroadcastEvidence(&rpctypes.Context{}, ev) } diff --git a/rpc/client/mock/status.go b/rpc/client/mock/status.go index 6dd6a8d44..22548e891 100644 --- a/rpc/client/mock/status.go +++ b/rpc/client/mock/status.go @@ -4,7 +4,7 @@ import ( "context" "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" ) // StatusMock returns the result specified by the Call @@ -17,12 +17,12 @@ var ( _ client.StatusClient = (*StatusRecorder)(nil) ) -func (m *StatusMock) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (m *StatusMock) Status(ctx context.Context) (*coretypes.ResultStatus, error) { res, err := m.GetResponse(nil) if err != nil { return nil, err } - return res.(*ctypes.ResultStatus), nil + return res.(*coretypes.ResultStatus), nil } // StatusRecorder can wrap another type (StatusMock, full client) @@ -43,7 +43,7 @@ func (r *StatusRecorder) addCall(call Call) { r.Calls = append(r.Calls, call) } -func (r *StatusRecorder) Status(ctx context.Context) (*ctypes.ResultStatus, error) { +func (r *StatusRecorder) Status(ctx context.Context) (*coretypes.ResultStatus, error) { res, err := r.Client.Status(ctx) r.addCall(Call{ Name: "status", diff --git a/rpc/client/mock/status_test.go b/rpc/client/mock/status_test.go index 3933c33c9..98655280e 100644 --- a/rpc/client/mock/status_test.go +++ b/rpc/client/mock/status_test.go @@ -10,7 +10,7 @@ import ( "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/rpc/client/mock" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" ) func TestStatus(t *testing.T) { @@ -18,14 +18,21 @@ func TestStatus(t *testing.T) { m := &mock.StatusMock{ Call: mock.Call{ - Response: &ctypes.ResultStatus{ - SyncInfo: ctypes.SyncInfo{ - LatestBlockHash: bytes.HexBytes("block"), - LatestAppHash: bytes.HexBytes("app"), - LatestBlockHeight: 10, - MaxPeerBlockHeight: 20, - TotalSyncedTime: time.Second, - RemainingTime: time.Minute, + Response: &coretypes.ResultStatus{ + SyncInfo: coretypes.SyncInfo{ + LatestBlockHash: bytes.HexBytes("block"), + LatestAppHash: bytes.HexBytes("app"), + LatestBlockHeight: 10, + MaxPeerBlockHeight: 20, + TotalSyncedTime: time.Second, + RemainingTime: time.Minute, + TotalSnapshots: 10, + ChunkProcessAvgTime: time.Duration(10), + SnapshotHeight: 10, + SnapshotChunksCount: 9, + SnapshotChunksTotal: 10, + BackFilledBlocks: 9, + BackFillBlocksTotal: 10, }, }}, } @@ -49,11 +56,19 @@ func TestStatus(t *testing.T) { assert.Nil(rs.Args) assert.Nil(rs.Error) require.NotNil(rs.Response) - st, ok := rs.Response.(*ctypes.ResultStatus) + st, ok := rs.Response.(*coretypes.ResultStatus) require.True(ok) assert.EqualValues("block", st.SyncInfo.LatestBlockHash) assert.EqualValues(10, st.SyncInfo.LatestBlockHeight) assert.EqualValues(20, st.SyncInfo.MaxPeerBlockHeight) assert.EqualValues(time.Second, status.SyncInfo.TotalSyncedTime) assert.EqualValues(time.Minute, status.SyncInfo.RemainingTime) + + assert.EqualValues(10, st.SyncInfo.TotalSnapshots) + assert.EqualValues(time.Duration(10), st.SyncInfo.ChunkProcessAvgTime) + assert.EqualValues(10, st.SyncInfo.SnapshotHeight) + assert.EqualValues(9, status.SyncInfo.SnapshotChunksCount) + assert.EqualValues(10, status.SyncInfo.SnapshotChunksTotal) + assert.EqualValues(9, status.SyncInfo.BackFilledBlocks) + assert.EqualValues(10, status.SyncInfo.BackFillBlocksTotal) } diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go deleted file mode 100644 index ef374b9a8..000000000 --- a/rpc/client/mocks/client.go +++ /dev/null @@ -1,831 +0,0 @@ -// Code generated by mockery. DO NOT EDIT. - -package mocks - -import ( - bytes "github.com/tendermint/tendermint/libs/bytes" - client "github.com/tendermint/tendermint/rpc/client" - - context "context" - - coretypes "github.com/tendermint/tendermint/rpc/core/types" - - log "github.com/tendermint/tendermint/libs/log" - - mock "github.com/stretchr/testify/mock" - - types "github.com/tendermint/tendermint/types" -) - -// Client is an autogenerated mock type for the Client type -type Client struct { - mock.Mock -} - -// ABCIInfo provides a mock function with given fields: _a0 -func (_m *Client) ABCIInfo(_a0 context.Context) (*coretypes.ResultABCIInfo, error) { - ret := _m.Called(_a0) - - var r0 *coretypes.ResultABCIInfo - if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultABCIInfo); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultABCIInfo) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ABCIQuery provides a mock function with given fields: ctx, path, data -func (_m *Client) ABCIQuery(ctx context.Context, path string, data bytes.HexBytes) (*coretypes.ResultABCIQuery, error) { - ret := _m.Called(ctx, path, data) - - var r0 *coretypes.ResultABCIQuery - if rf, ok := ret.Get(0).(func(context.Context, string, bytes.HexBytes) *coretypes.ResultABCIQuery); ok { - r0 = rf(ctx, path, data) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultABCIQuery) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string, bytes.HexBytes) error); ok { - r1 = rf(ctx, path, data) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ABCIQueryWithOptions provides a mock function with given fields: ctx, path, data, opts -func (_m *Client) ABCIQueryWithOptions(ctx context.Context, path string, data bytes.HexBytes, opts client.ABCIQueryOptions) (*coretypes.ResultABCIQuery, error) { - ret := _m.Called(ctx, path, data, opts) - - var r0 *coretypes.ResultABCIQuery - if rf, ok := ret.Get(0).(func(context.Context, string, bytes.HexBytes, client.ABCIQueryOptions) *coretypes.ResultABCIQuery); ok { - r0 = rf(ctx, path, data, opts) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultABCIQuery) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string, bytes.HexBytes, client.ABCIQueryOptions) error); ok { - r1 = rf(ctx, path, data, opts) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Block provides a mock function with given fields: ctx, height -func (_m *Client) Block(ctx context.Context, height *int64) (*coretypes.ResultBlock, error) { - ret := _m.Called(ctx, height) - - var r0 *coretypes.ResultBlock - if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultBlock); ok { - r0 = rf(ctx, height) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultBlock) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { - r1 = rf(ctx, height) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BlockByHash provides a mock function with given fields: ctx, hash -func (_m *Client) BlockByHash(ctx context.Context, hash []byte) (*coretypes.ResultBlock, error) { - ret := _m.Called(ctx, hash) - - var r0 *coretypes.ResultBlock - if rf, ok := ret.Get(0).(func(context.Context, []byte) *coretypes.ResultBlock); ok { - r0 = rf(ctx, hash) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultBlock) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, []byte) error); ok { - r1 = rf(ctx, hash) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BlockResults provides a mock function with given fields: ctx, height -func (_m *Client) BlockResults(ctx context.Context, height *int64) (*coretypes.ResultBlockResults, error) { - ret := _m.Called(ctx, height) - - var r0 *coretypes.ResultBlockResults - if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultBlockResults); ok { - r0 = rf(ctx, height) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultBlockResults) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { - r1 = rf(ctx, height) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BlockSearch provides a mock function with given fields: ctx, query, page, perPage, orderBy -func (_m *Client) BlockSearch(ctx context.Context, query string, page *int, perPage *int, orderBy string) (*coretypes.ResultBlockSearch, error) { - ret := _m.Called(ctx, query, page, perPage, orderBy) - - var r0 *coretypes.ResultBlockSearch - if rf, ok := ret.Get(0).(func(context.Context, string, *int, *int, string) *coretypes.ResultBlockSearch); ok { - r0 = rf(ctx, query, page, perPage, orderBy) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultBlockSearch) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string, *int, *int, string) error); ok { - r1 = rf(ctx, query, page, perPage, orderBy) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BlockchainInfo provides a mock function with given fields: ctx, minHeight, maxHeight -func (_m *Client) BlockchainInfo(ctx context.Context, minHeight int64, maxHeight int64) (*coretypes.ResultBlockchainInfo, error) { - ret := _m.Called(ctx, minHeight, maxHeight) - - var r0 *coretypes.ResultBlockchainInfo - if rf, ok := ret.Get(0).(func(context.Context, int64, int64) *coretypes.ResultBlockchainInfo); ok { - r0 = rf(ctx, minHeight, maxHeight) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultBlockchainInfo) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, int64, int64) error); ok { - r1 = rf(ctx, minHeight, maxHeight) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BroadcastEvidence provides a mock function with given fields: _a0, _a1 -func (_m *Client) BroadcastEvidence(_a0 context.Context, _a1 types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { - ret := _m.Called(_a0, _a1) - - var r0 *coretypes.ResultBroadcastEvidence - if rf, ok := ret.Get(0).(func(context.Context, types.Evidence) *coretypes.ResultBroadcastEvidence); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultBroadcastEvidence) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.Evidence) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BroadcastTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) BroadcastTxAsync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { - ret := _m.Called(_a0, _a1) - - var r0 *coretypes.ResultBroadcastTx - if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTx); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultBroadcastTx) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BroadcastTxCommit provides a mock function with given fields: _a0, _a1 -func (_m *Client) BroadcastTxCommit(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { - ret := _m.Called(_a0, _a1) - - var r0 *coretypes.ResultBroadcastTxCommit - if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTxCommit); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultBroadcastTxCommit) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// BroadcastTxSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) BroadcastTxSync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { - ret := _m.Called(_a0, _a1) - - var r0 *coretypes.ResultBroadcastTx - if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTx); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultBroadcastTx) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// CheckTx provides a mock function with given fields: _a0, _a1 -func (_m *Client) CheckTx(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultCheckTx, error) { - ret := _m.Called(_a0, _a1) - - var r0 *coretypes.ResultCheckTx - if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultCheckTx); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultCheckTx) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Commit provides a mock function with given fields: ctx, height -func (_m *Client) Commit(ctx context.Context, height *int64) (*coretypes.ResultCommit, error) { - ret := _m.Called(ctx, height) - - var r0 *coretypes.ResultCommit - if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultCommit); ok { - r0 = rf(ctx, height) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultCommit) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { - r1 = rf(ctx, height) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ConsensusParams provides a mock function with given fields: ctx, height -func (_m *Client) ConsensusParams(ctx context.Context, height *int64) (*coretypes.ResultConsensusParams, error) { - ret := _m.Called(ctx, height) - - var r0 *coretypes.ResultConsensusParams - if rf, ok := ret.Get(0).(func(context.Context, *int64) *coretypes.ResultConsensusParams); ok { - r0 = rf(ctx, height) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultConsensusParams) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *int64) error); ok { - r1 = rf(ctx, height) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// ConsensusState provides a mock function with given fields: _a0 -func (_m *Client) ConsensusState(_a0 context.Context) (*coretypes.ResultConsensusState, error) { - ret := _m.Called(_a0) - - var r0 *coretypes.ResultConsensusState - if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultConsensusState); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultConsensusState) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// DumpConsensusState provides a mock function with given fields: _a0 -func (_m *Client) DumpConsensusState(_a0 context.Context) (*coretypes.ResultDumpConsensusState, error) { - ret := _m.Called(_a0) - - var r0 *coretypes.ResultDumpConsensusState - if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultDumpConsensusState); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultDumpConsensusState) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Genesis provides a mock function with given fields: _a0 -func (_m *Client) Genesis(_a0 context.Context) (*coretypes.ResultGenesis, error) { - ret := _m.Called(_a0) - - var r0 *coretypes.ResultGenesis - if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultGenesis); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultGenesis) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// GenesisChunked provides a mock function with given fields: _a0, _a1 -func (_m *Client) GenesisChunked(_a0 context.Context, _a1 uint) (*coretypes.ResultGenesisChunk, error) { - ret := _m.Called(_a0, _a1) - - var r0 *coretypes.ResultGenesisChunk - if rf, ok := ret.Get(0).(func(context.Context, uint) *coretypes.ResultGenesisChunk); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultGenesisChunk) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, uint) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Health provides a mock function with given fields: _a0 -func (_m *Client) Health(_a0 context.Context) (*coretypes.ResultHealth, error) { - ret := _m.Called(_a0) - - var r0 *coretypes.ResultHealth - if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultHealth); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultHealth) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// IsRunning provides a mock function with given fields: -func (_m *Client) IsRunning() bool { - ret := _m.Called() - - var r0 bool - if rf, ok := ret.Get(0).(func() bool); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(bool) - } - - return r0 -} - -// NetInfo provides a mock function with given fields: _a0 -func (_m *Client) NetInfo(_a0 context.Context) (*coretypes.ResultNetInfo, error) { - ret := _m.Called(_a0) - - var r0 *coretypes.ResultNetInfo - if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultNetInfo); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultNetInfo) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// NumUnconfirmedTxs provides a mock function with given fields: _a0 -func (_m *Client) NumUnconfirmedTxs(_a0 context.Context) (*coretypes.ResultUnconfirmedTxs, error) { - ret := _m.Called(_a0) - - var r0 *coretypes.ResultUnconfirmedTxs - if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultUnconfirmedTxs); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultUnconfirmedTxs) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// OnReset provides a mock function with given fields: -func (_m *Client) OnReset() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// OnStart provides a mock function with given fields: -func (_m *Client) OnStart() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// OnStop provides a mock function with given fields: -func (_m *Client) OnStop() { - _m.Called() -} - -// Quit provides a mock function with given fields: -func (_m *Client) Quit() <-chan struct{} { - ret := _m.Called() - - var r0 <-chan struct{} - if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { - r0 = rf() - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan struct{}) - } - } - - return r0 -} - -// Reset provides a mock function with given fields: -func (_m *Client) Reset() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SetLogger provides a mock function with given fields: _a0 -func (_m *Client) SetLogger(_a0 log.Logger) { - _m.Called(_a0) -} - -// Start provides a mock function with given fields: -func (_m *Client) Start() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Status provides a mock function with given fields: _a0 -func (_m *Client) Status(_a0 context.Context) (*coretypes.ResultStatus, error) { - ret := _m.Called(_a0) - - var r0 *coretypes.ResultStatus - if rf, ok := ret.Get(0).(func(context.Context) *coretypes.ResultStatus); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultStatus) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Stop provides a mock function with given fields: -func (_m *Client) Stop() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// String provides a mock function with given fields: -func (_m *Client) String() string { - ret := _m.Called() - - var r0 string - if rf, ok := ret.Get(0).(func() string); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(string) - } - - return r0 -} - -// Subscribe provides a mock function with given fields: ctx, subscriber, query, outCapacity -func (_m *Client) Subscribe(ctx context.Context, subscriber string, query string, outCapacity ...int) (<-chan coretypes.ResultEvent, error) { - _va := make([]interface{}, len(outCapacity)) - for _i := range outCapacity { - _va[_i] = outCapacity[_i] - } - var _ca []interface{} - _ca = append(_ca, ctx, subscriber, query) - _ca = append(_ca, _va...) - ret := _m.Called(_ca...) - - var r0 <-chan coretypes.ResultEvent - if rf, ok := ret.Get(0).(func(context.Context, string, string, ...int) <-chan coretypes.ResultEvent); ok { - r0 = rf(ctx, subscriber, query, outCapacity...) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(<-chan coretypes.ResultEvent) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string, string, ...int) error); ok { - r1 = rf(ctx, subscriber, query, outCapacity...) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Tx provides a mock function with given fields: ctx, hash, prove -func (_m *Client) Tx(ctx context.Context, hash []byte, prove bool) (*coretypes.ResultTx, error) { - ret := _m.Called(ctx, hash, prove) - - var r0 *coretypes.ResultTx - if rf, ok := ret.Get(0).(func(context.Context, []byte, bool) *coretypes.ResultTx); ok { - r0 = rf(ctx, hash, prove) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultTx) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, []byte, bool) error); ok { - r1 = rf(ctx, hash, prove) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// TxSearch provides a mock function with given fields: ctx, query, prove, page, perPage, orderBy -func (_m *Client) TxSearch(ctx context.Context, query string, prove bool, page *int, perPage *int, orderBy string) (*coretypes.ResultTxSearch, error) { - ret := _m.Called(ctx, query, prove, page, perPage, orderBy) - - var r0 *coretypes.ResultTxSearch - if rf, ok := ret.Get(0).(func(context.Context, string, bool, *int, *int, string) *coretypes.ResultTxSearch); ok { - r0 = rf(ctx, query, prove, page, perPage, orderBy) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultTxSearch) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, string, bool, *int, *int, string) error); ok { - r1 = rf(ctx, query, prove, page, perPage, orderBy) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// UnconfirmedTxs provides a mock function with given fields: ctx, limit -func (_m *Client) UnconfirmedTxs(ctx context.Context, limit *int) (*coretypes.ResultUnconfirmedTxs, error) { - ret := _m.Called(ctx, limit) - - var r0 *coretypes.ResultUnconfirmedTxs - if rf, ok := ret.Get(0).(func(context.Context, *int) *coretypes.ResultUnconfirmedTxs); ok { - r0 = rf(ctx, limit) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultUnconfirmedTxs) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *int) error); ok { - r1 = rf(ctx, limit) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Unsubscribe provides a mock function with given fields: ctx, subscriber, query -func (_m *Client) Unsubscribe(ctx context.Context, subscriber string, query string) error { - ret := _m.Called(ctx, subscriber, query) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string, string) error); ok { - r0 = rf(ctx, subscriber, query) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// UnsubscribeAll provides a mock function with given fields: ctx, subscriber -func (_m *Client) UnsubscribeAll(ctx context.Context, subscriber string) error { - ret := _m.Called(ctx, subscriber) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { - r0 = rf(ctx, subscriber) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Validators provides a mock function with given fields: ctx, height, page, perPage -func (_m *Client) Validators(ctx context.Context, height *int64, page *int, perPage *int) (*coretypes.ResultValidators, error) { - ret := _m.Called(ctx, height, page, perPage) - - var r0 *coretypes.ResultValidators - if rf, ok := ret.Get(0).(func(context.Context, *int64, *int, *int) *coretypes.ResultValidators); ok { - r0 = rf(ctx, height, page, perPage) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*coretypes.ResultValidators) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *int64, *int, *int) error); ok { - r1 = rf(ctx, height, page, perPage) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Wait provides a mock function with given fields: -func (_m *Client) Wait() { - _m.Called() -} diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index f8962fb35..38766e047 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -1,6 +1,7 @@ package client_test import ( + "bytes" "context" "encoding/base64" "fmt" @@ -16,15 +17,18 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" - mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/internal/mempool" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/rpc/client" rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpclocal "github.com/tendermint/tendermint/rpc/client/local" - ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/rpc/coretypes" rpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" "github.com/tendermint/tendermint/types" ) @@ -33,10 +37,16 @@ func getHTTPClient(t *testing.T, conf *config.Config) *rpchttp.HTTP { t.Helper() rpcAddr := conf.RPC.ListenAddress - c, err := rpchttp.New(rpcAddr) + c, err := rpchttp.NewWithClient(rpcAddr, http.DefaultClient) require.NoError(t, err) c.SetLogger(log.TestingLogger()) + t.Cleanup(func() { + if c.IsRunning() { + require.NoError(t, c.Stop()) + } + }) + return c } @@ -44,10 +54,18 @@ func getHTTPClientWithTimeout(t *testing.T, conf *config.Config, timeout time.Du t.Helper() rpcAddr := conf.RPC.ListenAddress - c, err := rpchttp.NewWithTimeout(rpcAddr, timeout) + + http.DefaultClient.Timeout = timeout + c, err := rpchttp.NewWithClient(rpcAddr, http.DefaultClient) require.NoError(t, err) c.SetLogger(log.TestingLogger()) + t.Cleanup(func() { + http.DefaultClient.Timeout = 0 + if c.IsRunning() { + require.NoError(t, c.Stop()) + } + }) return c } @@ -63,707 +81,758 @@ func GetClients(t *testing.T, ns service.Service, conf *config.Config) []client. require.NoError(t, err) return []client.Client{ - getHTTPClient(t, conf), ncl, + getHTTPClient(t, conf), } } -func TestNilCustomHTTPClient(t *testing.T) { - require.Panics(t, func() { - _, _ = rpchttp.NewWithClient("http://example.com", nil) - }) - require.Panics(t, func() { - _, _ = rpcclient.NewWithHTTPClient("http://example.com", nil) - }) -} - -func TestParseInvalidAddress(t *testing.T) { - _, conf := NodeSuite(t) - // should remove trailing / - invalidRemote := conf.RPC.ListenAddress + "/" - _, err := rpchttp.New(invalidRemote) - require.NoError(t, err) -} - -func TestCustomHTTPClient(t *testing.T) { - _, conf := NodeSuite(t) - remote := conf.RPC.ListenAddress - c, err := rpchttp.NewWithClient(remote, http.DefaultClient) - require.Nil(t, err) - status, err := c.Status(context.Background()) - require.NoError(t, err) - require.NotNil(t, status) -} - -func TestCorsEnabled(t *testing.T) { - _, conf := NodeSuite(t) - origin := conf.RPC.CORSAllowedOrigins[0] - remote := strings.ReplaceAll(conf.RPC.ListenAddress, "tcp", "http") - - req, err := http.NewRequest("GET", remote, nil) - require.Nil(t, err, "%+v", err) - req.Header.Set("Origin", origin) - c := &http.Client{} - resp, err := c.Do(req) - require.Nil(t, err, "%+v", err) - defer resp.Body.Close() - - assert.Equal(t, resp.Header.Get("Access-Control-Allow-Origin"), origin) -} - -// Make sure status is correct (we connect properly) -func TestStatus(t *testing.T) { +func TestClientOperations(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - moniker := conf.Moniker + _, conf := NodeSuite(t) + + t.Run("NilCustomHTTPClient", func(t *testing.T) { + _, err := rpchttp.NewWithClient("http://example.com", nil) + require.Error(t, err) + + _, err = rpcclient.NewWithHTTPClient("http://example.com", nil) + require.Error(t, err) + }) + t.Run("ParseInvalidAddress", func(t *testing.T) { + // should remove trailing / + invalidRemote := conf.RPC.ListenAddress + "/" + _, err := rpchttp.New(invalidRemote) + require.NoError(t, err) + }) + t.Run("CustomHTTPClient", func(t *testing.T) { + remote := conf.RPC.ListenAddress + c, err := rpchttp.NewWithClient(remote, http.DefaultClient) + require.Nil(t, err) status, err := c.Status(ctx) - require.Nil(t, err, "%d: %+v", i, err) - assert.Equal(t, moniker, status.NodeInfo.Moniker) - } + require.NoError(t, err) + require.NotNil(t, status) + }) + t.Run("CorsEnabled", func(t *testing.T) { + origin := conf.RPC.CORSAllowedOrigins[0] + remote := strings.ReplaceAll(conf.RPC.ListenAddress, "tcp", "http") + + req, err := http.NewRequestWithContext(ctx, "GET", remote, nil) + require.Nil(t, err, "%+v", err) + req.Header.Set("Origin", origin) + resp, err := http.DefaultClient.Do(req) + require.Nil(t, err, "%+v", err) + defer resp.Body.Close() + + assert.Equal(t, resp.Header.Get("Access-Control-Allow-Origin"), origin) + }) + t.Run("Batching", func(t *testing.T) { + t.Run("JSONRPCCalls", func(t *testing.T) { + c := getHTTPClient(t, conf) + testBatchedJSONRPCCalls(ctx, t, c) + }) + t.Run("JSONRPCCallsCancellation", func(t *testing.T) { + _, _, tx1 := MakeTxKV() + _, _, tx2 := MakeTxKV() + + c := getHTTPClient(t, conf) + batch := c.NewBatch() + _, err := batch.BroadcastTxCommit(ctx, tx1) + require.NoError(t, err) + _, err = batch.BroadcastTxCommit(ctx, tx2) + require.NoError(t, err) + // we should have 2 requests waiting + require.Equal(t, 2, batch.Count()) + // we want to make sure we cleared 2 pending requests + require.Equal(t, 2, batch.Clear()) + // now there should be no batched requests + require.Equal(t, 0, batch.Count()) + }) + t.Run("SendingEmptyRequest", func(t *testing.T) { + c := getHTTPClient(t, conf) + batch := c.NewBatch() + _, err := batch.Send(ctx) + require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error") + }) + t.Run("ClearingEmptyRequest", func(t *testing.T) { + c := getHTTPClient(t, conf) + batch := c.NewBatch() + require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result") + }) + t.Run("ConcurrentJSONRPC", func(t *testing.T) { + var wg sync.WaitGroup + c := getHTTPClient(t, conf) + for i := 0; i < 50; i++ { + wg.Add(1) + go func() { + defer wg.Done() + testBatchedJSONRPCCalls(ctx, t, c) + }() + } + wg.Wait() + }) + }) + t.Run("HTTPReturnsErrorIfClientIsNotRunning", func(t *testing.T) { + c := getHTTPClientWithTimeout(t, conf, 100*time.Millisecond) + + // on Subscribe + _, err := c.Subscribe(ctx, "TestHeaderEvents", + types.QueryForEvent(types.EventNewBlockHeaderValue).String()) + assert.Error(t, err) + + // on Unsubscribe + err = c.Unsubscribe(ctx, "TestHeaderEvents", + types.QueryForEvent(types.EventNewBlockHeaderValue).String()) + assert.Error(t, err) + + // on UnsubscribeAll + err = c.UnsubscribeAll(ctx, "TestHeaderEvents") + assert.Error(t, err) + }) } // Make sure info is correct (we connect properly) -func TestInfo(t *testing.T) { +func TestClientMethodCalls(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() n, conf := NodeSuite(t) + // for broadcast tx tests + pool := getMempool(t, n) + + // for evidence tests + pv, err := privval.LoadOrGenFilePV(conf.PrivValidator.KeyFile(), conf.PrivValidator.StateFile()) + require.NoError(t, err) + for i, c := range GetClients(t, n, conf) { - // status, err := c.Status() - // require.Nil(t, err, "%+v", err) - info, err := c.ABCIInfo(ctx) - require.Nil(t, err, "%d: %+v", i, err) - // TODO: this is not correct - fix merkleeyes! - // assert.EqualValues(t, status.SyncInfo.LatestBlockHeight, info.Response.LastBlockHeight) - assert.True(t, strings.Contains(info.Response.Data, "size")) + t.Run(fmt.Sprintf("%T", c), func(t *testing.T) { + t.Run("Status", func(t *testing.T) { + status, err := c.Status(ctx) + require.Nil(t, err, "%d: %+v", i, err) + assert.Equal(t, conf.Moniker, status.NodeInfo.Moniker) + }) + t.Run("Info", func(t *testing.T) { + info, err := c.ABCIInfo(ctx) + require.NoError(t, err) + + status, err := c.Status(ctx) + require.NoError(t, err) + + assert.GreaterOrEqual(t, status.SyncInfo.LatestBlockHeight, info.Response.LastBlockHeight) + assert.True(t, strings.Contains(info.Response.Data, "size")) + }) + t.Run("NetInfo", func(t *testing.T) { + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + netinfo, err := nc.NetInfo(ctx) + require.Nil(t, err, "%d: %+v", i, err) + assert.True(t, netinfo.Listening) + assert.Equal(t, 0, len(netinfo.Peers)) + }) + t.Run("DumpConsensusState", func(t *testing.T) { + // FIXME: fix server so it doesn't panic on invalid input + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + cons, err := nc.DumpConsensusState(ctx) + require.Nil(t, err, "%d: %+v", i, err) + assert.NotEmpty(t, cons.RoundState) + assert.Empty(t, cons.Peers) + }) + t.Run("ConsensusState", func(t *testing.T) { + // FIXME: fix server so it doesn't panic on invalid input + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + cons, err := nc.ConsensusState(ctx) + require.Nil(t, err, "%d: %+v", i, err) + assert.NotEmpty(t, cons.RoundState) + }) + t.Run("Health", func(t *testing.T) { + nc, ok := c.(client.NetworkClient) + require.True(t, ok, "%d", i) + _, err := nc.Health(ctx) + require.Nil(t, err, "%d: %+v", i, err) + }) + t.Run("GenesisAndValidators", func(t *testing.T) { + // make sure this is the right genesis file + gen, err := c.Genesis(ctx) + require.Nil(t, err, "%d: %+v", i, err) + // get the genesis validator + require.Equal(t, 1, len(gen.Genesis.Validators)) + gval := gen.Genesis.Validators[0] + + // get the current validators + h := int64(1) + vals, err := c.Validators(ctx, &h, nil, nil) + require.Nil(t, err, "%d: %+v", i, err) + require.Equal(t, 1, len(vals.Validators)) + require.Equal(t, 1, vals.Count) + require.Equal(t, 1, vals.Total) + val := vals.Validators[0] + + // make sure the current set is also the genesis set + assert.Equal(t, gval.Power, val.VotingPower) + assert.Equal(t, gval.PubKey, val.PubKey) + }) + t.Run("GenesisChunked", func(t *testing.T) { + first, err := c.GenesisChunked(ctx, 0) + require.NoError(t, err) + + decoded := make([]string, 0, first.TotalChunks) + for i := 0; i < first.TotalChunks; i++ { + chunk, err := c.GenesisChunked(ctx, uint(i)) + require.NoError(t, err) + data, err := base64.StdEncoding.DecodeString(chunk.Data) + require.NoError(t, err) + decoded = append(decoded, string(data)) + + } + doc := []byte(strings.Join(decoded, "")) + + var out types.GenesisDoc + require.NoError(t, tmjson.Unmarshal(doc, &out), + "first: %+v, doc: %s", first, string(doc)) + }) + t.Run("ABCIQuery", func(t *testing.T) { + // write something + k, v, tx := MakeTxKV() + status, err := c.Status(ctx) + require.NoError(t, err) + _, err = c.BroadcastTxSync(ctx, tx) + require.NoError(t, err, "%d: %+v", i, err) + apph := status.SyncInfo.LatestBlockHeight + 2 // this is where the tx will be applied to the state + + // wait before querying + err = client.WaitForHeight(c, apph, nil) + require.NoError(t, err) + res, err := c.ABCIQuery(ctx, "/key", k) + qres := res.Response + if assert.NoError(t, err) && assert.True(t, qres.IsOK()) { + assert.EqualValues(t, v, qres.Value) + } + }) + t.Run("AppCalls", func(t *testing.T) { + // get an offset of height to avoid racing and guessing + s, err := c.Status(ctx) + require.NoError(t, err) + // sh is start height or status height + sh := s.SyncInfo.LatestBlockHeight + + // look for the future + h := sh + 20 + _, err = c.Block(ctx, &h) + require.Error(t, err) // no block yet + + // write something + k, v, tx := MakeTxKV() + bres, err := c.BroadcastTxCommit(ctx, tx) + require.NoError(t, err) + require.True(t, bres.DeliverTx.IsOK()) + txh := bres.Height + apph := txh + 1 // this is where the tx will be applied to the state + + // wait before querying + err = client.WaitForHeight(c, apph, nil) + require.NoError(t, err) + + _qres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: false}) + require.NoError(t, err) + qres := _qres.Response + if assert.True(t, qres.IsOK()) { + assert.Equal(t, k, qres.Key) + assert.EqualValues(t, v, qres.Value) + } + + // make sure we can lookup the tx with proof + ptx, err := c.Tx(ctx, bres.Hash, true) + require.NoError(t, err) + assert.EqualValues(t, txh, ptx.Height) + assert.EqualValues(t, tx, ptx.Tx) + + // and we can even check the block is added + block, err := c.Block(ctx, &apph) + require.NoError(t, err) + appHash := block.Block.Header.AppHash + assert.True(t, len(appHash) > 0) + assert.EqualValues(t, apph, block.Block.Header.Height) + + blockByHash, err := c.BlockByHash(ctx, block.BlockID.Hash) + require.NoError(t, err) + require.Equal(t, block, blockByHash) + + // now check the results + blockResults, err := c.BlockResults(ctx, &txh) + require.NoError(t, err, "%d: %+v", i, err) + assert.Equal(t, txh, blockResults.Height) + if assert.Equal(t, 1, len(blockResults.TxsResults)) { + // check success code + assert.EqualValues(t, 0, blockResults.TxsResults[0].Code) + } + + // check blockchain info, now that we know there is info + info, err := c.BlockchainInfo(ctx, apph, apph) + require.NoError(t, err) + assert.True(t, info.LastHeight >= apph) + if assert.Equal(t, 1, len(info.BlockMetas)) { + lastMeta := info.BlockMetas[0] + assert.EqualValues(t, apph, lastMeta.Header.Height) + blockData := block.Block + assert.Equal(t, blockData.Header.AppHash, lastMeta.Header.AppHash) + assert.Equal(t, block.BlockID, lastMeta.BlockID) + } + + // and get the corresponding commit with the same apphash + commit, err := c.Commit(ctx, &apph) + require.NoError(t, err) + cappHash := commit.Header.AppHash + assert.Equal(t, appHash, cappHash) + assert.NotNil(t, commit.Commit) + + // compare the commits (note Commit(2) has commit from Block(3)) + h = apph - 1 + commit2, err := c.Commit(ctx, &h) + require.NoError(t, err) + assert.Equal(t, block.Block.LastCommitHash, commit2.Commit.Hash()) + + // and we got a proof that works! + _pres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: true}) + require.NoError(t, err) + pres := _pres.Response + assert.True(t, pres.IsOK()) + + // XXX Test proof + }) + t.Run("BlockchainInfo", func(t *testing.T) { + err := client.WaitForHeight(c, 10, nil) + require.NoError(t, err) + + res, err := c.BlockchainInfo(ctx, 0, 0) + require.Nil(t, err, "%d: %+v", i, err) + assert.True(t, res.LastHeight > 0) + assert.True(t, len(res.BlockMetas) > 0) + + res, err = c.BlockchainInfo(ctx, 1, 1) + require.Nil(t, err, "%d: %+v", i, err) + assert.True(t, res.LastHeight > 0) + assert.True(t, len(res.BlockMetas) == 1) + + res, err = c.BlockchainInfo(ctx, 1, 10000) + require.Nil(t, err, "%d: %+v", i, err) + assert.True(t, res.LastHeight > 0) + assert.True(t, len(res.BlockMetas) < 100) + for _, m := range res.BlockMetas { + assert.NotNil(t, m) + } + + res, err = c.BlockchainInfo(ctx, 10000, 1) + require.NotNil(t, err) + assert.Nil(t, res) + assert.Contains(t, err.Error(), "can't be greater than max") + }) + t.Run("BroadcastTxCommit", func(t *testing.T) { + _, _, tx := MakeTxKV() + bres, err := c.BroadcastTxCommit(ctx, tx) + require.Nil(t, err, "%d: %+v", i, err) + require.True(t, bres.CheckTx.IsOK()) + require.True(t, bres.DeliverTx.IsOK()) + + require.Equal(t, 0, pool.Size()) + }) + t.Run("BroadcastTxSync", func(t *testing.T) { + _, _, tx := MakeTxKV() + initMempoolSize := pool.Size() + bres, err := c.BroadcastTxSync(ctx, tx) + require.Nil(t, err, "%d: %+v", i, err) + require.Equal(t, bres.Code, abci.CodeTypeOK) // FIXME + + require.Equal(t, initMempoolSize+1, pool.Size()) + + txs := pool.ReapMaxTxs(len(tx)) + require.EqualValues(t, tx, txs[0]) + pool.Flush() + }) + t.Run("CheckTx", func(t *testing.T) { + _, _, tx := MakeTxKV() + + res, err := c.CheckTx(ctx, tx) + require.NoError(t, err) + assert.Equal(t, abci.CodeTypeOK, res.Code) + + assert.Equal(t, 0, pool.Size(), "mempool must be empty") + }) + t.Run("Events", func(t *testing.T) { + // start for this test it if it wasn't already running + if !c.IsRunning() { + // if so, then we start it, listen, and stop it. + err := c.Start() + require.Nil(t, err) + t.Cleanup(func() { + if err := c.Stop(); err != nil { + t.Error(err) + } + }) + } + + t.Run("Header", func(t *testing.T) { + evt, err := client.WaitForOneEvent(c, types.EventNewBlockHeaderValue, waitForEventTimeout) + require.Nil(t, err, "%d: %+v", i, err) + _, ok := evt.(types.EventDataNewBlockHeader) + require.True(t, ok, "%d: %#v", i, evt) + // TODO: more checks... + }) + t.Run("Block", func(t *testing.T) { + const subscriber = "TestBlockEvents" + + eventCh, err := c.Subscribe(ctx, subscriber, types.QueryForEvent(types.EventNewBlockValue).String()) + require.NoError(t, err) + t.Cleanup(func() { + if err := c.UnsubscribeAll(ctx, subscriber); err != nil { + t.Error(err) + } + }) + + var firstBlockHeight int64 + for i := int64(0); i < 3; i++ { + event := <-eventCh + blockEvent, ok := event.Data.(types.EventDataNewBlock) + require.True(t, ok) + + block := blockEvent.Block + + if firstBlockHeight == 0 { + firstBlockHeight = block.Header.Height + } + + require.Equal(t, firstBlockHeight+i, block.Header.Height) + } + }) + t.Run("BroadcastTxAsync", func(t *testing.T) { + testTxEventsSent(ctx, t, "async", c) + }) + t.Run("BroadcastTxSync", func(t *testing.T) { + testTxEventsSent(ctx, t, "sync", c) + }) + }) + t.Run("Evidence", func(t *testing.T) { + t.Run("BraodcastDuplicateVote", func(t *testing.T) { + chainID := conf.ChainID() + + correct, fakes := makeEvidences(t, pv, chainID) + + // make sure that the node has produced enough blocks + waitForBlock(ctx, t, c, 2) + + result, err := c.BroadcastEvidence(ctx, correct) + require.NoError(t, err, "BroadcastEvidence(%s) failed", correct) + assert.Equal(t, correct.Hash(), result.Hash, "expected result hash to match evidence hash") + + status, err := c.Status(ctx) + require.NoError(t, err) + err = client.WaitForHeight(c, status.SyncInfo.LatestBlockHeight+2, nil) + require.NoError(t, err) + + ed25519pub := pv.Key.PubKey.(ed25519.PubKey) + rawpub := ed25519pub.Bytes() + result2, err := c.ABCIQuery(ctx, "/val", rawpub) + require.NoError(t, err) + qres := result2.Response + require.True(t, qres.IsOK()) + + var v abci.ValidatorUpdate + err = abci.ReadMessage(bytes.NewReader(qres.Value), &v) + require.NoError(t, err, "Error reading query result, value %v", qres.Value) + + pk, err := encoding.PubKeyFromProto(v.PubKey) + require.NoError(t, err) + + require.EqualValues(t, rawpub, pk, "Stored PubKey not equal with expected, value %v", string(qres.Value)) + require.Equal(t, int64(9), v.Power, "Stored Power not equal with expected, value %v", string(qres.Value)) + + for _, fake := range fakes { + _, err := c.BroadcastEvidence(ctx, fake) + require.Error(t, err, "BroadcastEvidence(%s) succeeded, but the evidence was fake", fake) + } + + }) + t.Run("BroadcastEmpty", func(t *testing.T) { + _, err := c.BroadcastEvidence(ctx, nil) + assert.Error(t, err) + }) + }) + }) } } -func TestNetInfo(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - netinfo, err := nc.NetInfo(ctx) - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, netinfo.Listening) - assert.Equal(t, 0, len(netinfo.Peers)) - } -} - -func TestDumpConsensusState(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - // FIXME: fix server so it doesn't panic on invalid input - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - cons, err := nc.DumpConsensusState(ctx) - require.Nil(t, err, "%d: %+v", i, err) - assert.NotEmpty(t, cons.RoundState) - assert.Empty(t, cons.Peers) - } -} - -func TestConsensusState(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - // FIXME: fix server so it doesn't panic on invalid input - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - cons, err := nc.ConsensusState(ctx) - require.Nil(t, err, "%d: %+v", i, err) - assert.NotEmpty(t, cons.RoundState) - } -} - -func TestHealth(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - _, err := nc.Health(ctx) - require.Nil(t, err, "%d: %+v", i, err) - } -} - -func TestGenesisAndValidators(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - for i, c := range GetClients(t, n, conf) { - - // make sure this is the right genesis file - gen, err := c.Genesis(ctx) - require.Nil(t, err, "%d: %+v", i, err) - // get the genesis validator - require.Equal(t, 1, len(gen.Genesis.Validators)) - gval := gen.Genesis.Validators[0] - - // get the current validators - h := int64(1) - vals, err := c.Validators(ctx, &h, nil, nil) - require.Nil(t, err, "%d: %+v", i, err) - require.Equal(t, 1, len(vals.Validators)) - require.Equal(t, 1, vals.Count) - require.Equal(t, 1, vals.Total) - val := vals.Validators[0] - - // make sure the current set is also the genesis set - assert.Equal(t, gval.Power, val.VotingPower) - assert.Equal(t, gval.PubKey, val.PubKey) - } -} - -func TestGenesisChunked(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for _, c := range GetClients(t, n, conf) { - first, err := c.GenesisChunked(ctx, 0) - require.NoError(t, err) - - decoded := make([]string, 0, first.TotalChunks) - for i := 0; i < first.TotalChunks; i++ { - chunk, err := c.GenesisChunked(ctx, uint(i)) - require.NoError(t, err) - data, err := base64.StdEncoding.DecodeString(chunk.Data) - require.NoError(t, err) - decoded = append(decoded, string(data)) - - } - doc := []byte(strings.Join(decoded, "")) - - var out types.GenesisDoc - require.NoError(t, tmjson.Unmarshal(doc, &out), - "first: %+v, doc: %s", first, string(doc)) - } -} - -func TestABCIQuery(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - // write something - k, v, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(ctx, tx) - require.Nil(t, err, "%d: %+v", i, err) - apph := bres.Height + 1 // this is where the tx will be applied to the state - - // wait before querying - err = client.WaitForHeight(c, apph, nil) - require.NoError(t, err) - res, err := c.ABCIQuery(ctx, "/key", k) - qres := res.Response - if assert.Nil(t, err) && assert.True(t, qres.IsOK()) { - assert.EqualValues(t, v, qres.Value) - } - } -} - -// Make some app checks -func TestAppCalls(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - - // get an offset of height to avoid racing and guessing - s, err := c.Status(ctx) - require.NoError(t, err) - // sh is start height or status height - sh := s.SyncInfo.LatestBlockHeight - - // look for the future - h := sh + 20 - _, err = c.Block(ctx, &h) - require.Error(t, err) // no block yet - - // write something - k, v, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(ctx, tx) - require.NoError(t, err) - require.True(t, bres.DeliverTx.IsOK()) - txh := bres.Height - apph := txh + 1 // this is where the tx will be applied to the state - - // wait before querying - err = client.WaitForHeight(c, apph, nil) - require.NoError(t, err) - - _qres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: false}) - require.NoError(t, err) - qres := _qres.Response - if assert.True(t, qres.IsOK()) { - assert.Equal(t, k, qres.Key) - assert.EqualValues(t, v, qres.Value) - } - - // make sure we can lookup the tx with proof - ptx, err := c.Tx(ctx, bres.Hash, true) - require.NoError(t, err) - assert.EqualValues(t, txh, ptx.Height) - assert.EqualValues(t, tx, ptx.Tx) - - // and we can even check the block is added - block, err := c.Block(ctx, &apph) - require.NoError(t, err) - appHash := block.Block.Header.AppHash - assert.True(t, len(appHash) > 0) - assert.EqualValues(t, apph, block.Block.Header.Height) - - blockByHash, err := c.BlockByHash(ctx, block.BlockID.Hash) - require.NoError(t, err) - require.Equal(t, block, blockByHash) - - // now check the results - blockResults, err := c.BlockResults(ctx, &txh) - require.NoError(t, err, "%d: %+v", i, err) - assert.Equal(t, txh, blockResults.Height) - if assert.Equal(t, 1, len(blockResults.TxsResults)) { - // check success code - assert.EqualValues(t, 0, blockResults.TxsResults[0].Code) - } - - // check blockchain info, now that we know there is info - info, err := c.BlockchainInfo(ctx, apph, apph) - require.NoError(t, err) - assert.True(t, info.LastHeight >= apph) - if assert.Equal(t, 1, len(info.BlockMetas)) { - lastMeta := info.BlockMetas[0] - assert.EqualValues(t, apph, lastMeta.Header.Height) - blockData := block.Block - assert.Equal(t, blockData.Header.AppHash, lastMeta.Header.AppHash) - assert.Equal(t, block.BlockID, lastMeta.BlockID) - } - - // and get the corresponding commit with the same apphash - commit, err := c.Commit(ctx, &apph) - require.NoError(t, err) - cappHash := commit.Header.AppHash - assert.Equal(t, appHash, cappHash) - assert.NotNil(t, commit.Commit) - - // compare the commits (note Commit(2) has commit from Block(3)) - h = apph - 1 - commit2, err := c.Commit(ctx, &h) - require.NoError(t, err) - assert.Equal(t, block.Block.LastCommitHash, commit2.Commit.Hash()) - - // and we got a proof that works! - _pres, err := c.ABCIQueryWithOptions(ctx, "/key", k, client.ABCIQueryOptions{Prove: true}) - require.NoError(t, err) - pres := _pres.Response - assert.True(t, pres.IsOK()) - - // XXX Test proof - } -} - -func TestBlockchainInfo(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - - for i, c := range GetClients(t, n, conf) { - err := client.WaitForHeight(c, 10, nil) - require.NoError(t, err) - - res, err := c.BlockchainInfo(ctx, 0, 0) - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, res.LastHeight > 0) - assert.True(t, len(res.BlockMetas) > 0) - - res, err = c.BlockchainInfo(ctx, 1, 1) - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, res.LastHeight > 0) - assert.True(t, len(res.BlockMetas) == 1) - - res, err = c.BlockchainInfo(ctx, 1, 10000) - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, res.LastHeight > 0) - assert.True(t, len(res.BlockMetas) < 100) - for _, m := range res.BlockMetas { - assert.NotNil(t, m) - } - - res, err = c.BlockchainInfo(ctx, 10000, 1) - require.NotNil(t, err) - assert.Nil(t, res) - assert.Contains(t, err.Error(), "can't be greater than max") - } -} - -func TestBroadcastTxSync(t *testing.T) { - n, conf := NodeSuite(t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // TODO (melekes): use mempool which is set on RPC rather than getting it from node - mempool := getMempool(t, n) - initMempoolSize := mempool.Size() - - for i, c := range GetClients(t, n, conf) { - _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxSync(ctx, tx) - require.Nil(t, err, "%d: %+v", i, err) - require.Equal(t, bres.Code, abci.CodeTypeOK) // FIXME - - require.Equal(t, initMempoolSize+1, mempool.Size()) - - txs := mempool.ReapMaxTxs(len(tx)) - require.EqualValues(t, tx, txs[0]) - mempool.Flush() - } -} - -func getMempool(t *testing.T, srv service.Service) mempl.Mempool { +func getMempool(t *testing.T, srv service.Service) mempool.Mempool { t.Helper() n, ok := srv.(interface { - Mempool() mempl.Mempool + Mempool() mempool.Mempool }) require.True(t, ok) return n.Mempool() } -func TestBroadcastTxCommit(t *testing.T) { +// these cases are roughly the same as the TestClientMethodCalls, but +// they have to loop over their clients in the individual test cases, +// so making a separate suite makes more sense, though isn't strictly +// speaking desirable. +func TestClientMethodCallsAdvanced(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() n, conf := NodeSuite(t) + pool := getMempool(t, n) - mempool := getMempool(t, n) - for i, c := range GetClients(t, n, conf) { + t.Run("UnconfirmedTxs", func(t *testing.T) { + _, _, tx := MakeTxKV() + ch := make(chan struct{}) + + err := pool.CheckTx(ctx, tx, func(_ *abci.Response) { close(ch) }, mempool.TxInfo{}) + require.NoError(t, err) + + // wait for tx to arrive in mempoool. + select { + case <-ch: + case <-time.After(5 * time.Second): + t.Error("Timed out waiting for CheckTx callback") + } + + for _, c := range GetClients(t, n, conf) { + mc := c.(client.MempoolClient) + limit := 1 + res, err := mc.UnconfirmedTxs(ctx, &limit) + require.NoError(t, err) + + assert.Equal(t, 1, res.Count) + assert.Equal(t, 1, res.Total) + assert.Equal(t, pool.SizeBytes(), res.TotalBytes) + assert.Exactly(t, types.Txs{tx}, types.Txs(res.Txs)) + } + + pool.Flush() + }) + t.Run("NumUnconfirmedTxs", func(t *testing.T) { + ch := make(chan struct{}) + + pool := getMempool(t, n) + + _, _, tx := MakeTxKV() + + err := pool.CheckTx(ctx, tx, func(_ *abci.Response) { close(ch) }, mempool.TxInfo{}) + require.NoError(t, err) + + // wait for tx to arrive in mempoool. + select { + case <-ch: + case <-time.After(5 * time.Second): + t.Error("Timed out waiting for CheckTx callback") + } + + mempoolSize := pool.Size() + for i, c := range GetClients(t, n, conf) { + mc, ok := c.(client.MempoolClient) + require.True(t, ok, "%d", i) + res, err := mc.NumUnconfirmedTxs(ctx) + require.Nil(t, err, "%d: %+v", i, err) + + assert.Equal(t, mempoolSize, res.Count) + assert.Equal(t, mempoolSize, res.Total) + assert.Equal(t, pool.SizeBytes(), res.TotalBytes) + } + + pool.Flush() + }) + t.Run("Tx", func(t *testing.T) { + c := getHTTPClient(t, conf) + + // first we broadcast a tx _, _, tx := MakeTxKV() bres, err := c.BroadcastTxCommit(ctx, tx) - require.Nil(t, err, "%d: %+v", i, err) - require.True(t, bres.CheckTx.IsOK()) - require.True(t, bres.DeliverTx.IsOK()) + require.Nil(t, err, "%+v", err) - require.Equal(t, 0, mempool.Size()) - } -} + txHeight := bres.Height + txHash := bres.Hash -func TestUnconfirmedTxs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + anotherTxHash := types.Tx("a different tx").Hash() - _, _, tx := MakeTxKV() - ch := make(chan *abci.Response, 1) + cases := []struct { + valid bool + prove bool + hash []byte + }{ + // only valid if correct hash provided + {true, false, txHash}, + {true, true, txHash}, + {false, false, anotherTxHash}, + {false, true, anotherTxHash}, + {false, false, nil}, + {false, true, nil}, + } - n, conf := NodeSuite(t) - mempool := getMempool(t, n) - err := mempool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{}) + for _, c := range GetClients(t, n, conf) { + t.Run(fmt.Sprintf("%T", c), func(t *testing.T) { + for j, tc := range cases { + t.Run(fmt.Sprintf("Case%d", j), func(t *testing.T) { + // now we query for the tx. + // since there's only one tx, we know index=0. + ptx, err := c.Tx(ctx, tc.hash, tc.prove) - require.NoError(t, err) + if !tc.valid { + require.NotNil(t, err) + } else { + require.Nil(t, err, "%+v", err) + assert.EqualValues(t, txHeight, ptx.Height) + assert.EqualValues(t, tx, ptx.Tx) + assert.Zero(t, ptx.Index) + assert.True(t, ptx.TxResult.IsOK()) + assert.EqualValues(t, txHash, ptx.Hash) - // wait for tx to arrive in mempoool. - select { - case <-ch: - case <-time.After(5 * time.Second): - t.Error("Timed out waiting for CheckTx callback") - } - - for _, c := range GetClients(t, n, conf) { - mc := c.(client.MempoolClient) - limit := 1 - res, err := mc.UnconfirmedTxs(ctx, &limit) - require.NoError(t, err) - - assert.Equal(t, 1, res.Count) - assert.Equal(t, 1, res.Total) - assert.Equal(t, mempool.SizeBytes(), res.TotalBytes) - assert.Exactly(t, types.Txs{tx}, types.Txs(res.Txs)) - } - - mempool.Flush() -} - -func TestNumUnconfirmedTxs(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, _, tx := MakeTxKV() - - n, conf := NodeSuite(t) - ch := make(chan *abci.Response, 1) - mempool := getMempool(t, n) - - err := mempool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{}) - require.NoError(t, err) - - // wait for tx to arrive in mempoool. - select { - case <-ch: - case <-time.After(5 * time.Second): - t.Error("Timed out waiting for CheckTx callback") - } - - mempoolSize := mempool.Size() - for i, c := range GetClients(t, n, conf) { - mc, ok := c.(client.MempoolClient) - require.True(t, ok, "%d", i) - res, err := mc.NumUnconfirmedTxs(ctx) - require.Nil(t, err, "%d: %+v", i, err) - - assert.Equal(t, mempoolSize, res.Count) - assert.Equal(t, mempoolSize, res.Total) - assert.Equal(t, mempool.SizeBytes(), res.TotalBytes) - } - - mempool.Flush() -} - -func TestCheckTx(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - n, conf := NodeSuite(t) - mempool := getMempool(t, n) - - for _, c := range GetClients(t, n, conf) { - _, _, tx := MakeTxKV() - - res, err := c.CheckTx(ctx, tx) - require.NoError(t, err) - assert.Equal(t, abci.CodeTypeOK, res.Code) - - assert.Equal(t, 0, mempool.Size(), "mempool must be empty") - } -} - -func TestTx(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - n, conf := NodeSuite(t) - - c := getHTTPClient(t, conf) - - // first we broadcast a tx - _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(ctx, tx) - require.Nil(t, err, "%+v", err) - - txHeight := bres.Height - txHash := bres.Hash - - anotherTxHash := types.Tx("a different tx").Hash() - - cases := []struct { - valid bool - prove bool - hash []byte - }{ - // only valid if correct hash provided - {true, false, txHash}, - {true, true, txHash}, - {false, false, anotherTxHash}, - {false, true, anotherTxHash}, - {false, false, nil}, - {false, true, nil}, - } - - for i, c := range GetClients(t, n, conf) { - for j, tc := range cases { - t.Logf("client %d, case %d", i, j) - - // now we query for the tx. - // since there's only one tx, we know index=0. - ptx, err := c.Tx(ctx, tc.hash, tc.prove) - - if !tc.valid { - require.NotNil(t, err) - } else { - require.Nil(t, err, "%+v", err) - assert.EqualValues(t, txHeight, ptx.Height) - assert.EqualValues(t, tx, ptx.Tx) - assert.Zero(t, ptx.Index) - assert.True(t, ptx.TxResult.IsOK()) - assert.EqualValues(t, txHash, ptx.Hash) - - // time to verify the proof - proof := ptx.Proof - if tc.prove && assert.EqualValues(t, tx, proof.Data) { - assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) + // time to verify the proof + proof := ptx.Proof + if tc.prove && assert.EqualValues(t, tx, proof.Data) { + assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) + } + } + }) } - } + }) } - } -} + }) + t.Run("TxSearchWithTimeout", func(t *testing.T) { + timeoutClient := getHTTPClientWithTimeout(t, conf, 10*time.Second) -func TestTxSearchWithTimeout(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - timeoutClient := getHTTPClientWithTimeout(t, conf, 10*time.Second) - - _, _, tx := MakeTxKV() - _, err := timeoutClient.BroadcastTxCommit(ctx, tx) - require.NoError(t, err) - - // query using a compositeKey (see kvstore application) - result, err := timeoutClient.TxSearch(ctx, "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") - require.Nil(t, err) - require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") -} - -func TestTxSearch(t *testing.T) { - n, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - - // first we broadcast a few txs - for i := 0; i < 10; i++ { _, _, tx := MakeTxKV() - _, err := c.BroadcastTxCommit(context.Background(), tx) + _, err := timeoutClient.BroadcastTxCommit(ctx, tx) require.NoError(t, err) - } - - // since we're not using an isolated test server, we'll have lingering transactions - // from other tests as well - result, err := c.TxSearch(context.Background(), "tx.height >= 0", true, nil, nil, "asc") - require.NoError(t, err) - txCount := len(result.Txs) - - // pick out the last tx to have something to search for in tests - find := result.Txs[len(result.Txs)-1] - anotherTxHash := types.Tx("a different tx").Hash() - - for i, c := range GetClients(t, n, conf) { - t.Logf("client %d", i) - - // now we query for the tx. - result, err := c.TxSearch(context.Background(), fmt.Sprintf("tx.hash='%v'", find.Hash), true, nil, nil, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 1) - require.Equal(t, find.Hash, result.Txs[0].Hash) - - ptx := result.Txs[0] - assert.EqualValues(t, find.Height, ptx.Height) - assert.EqualValues(t, find.Tx, ptx.Tx) - assert.Zero(t, ptx.Index) - assert.True(t, ptx.TxResult.IsOK()) - assert.EqualValues(t, find.Hash, ptx.Hash) - - // time to verify the proof - if assert.EqualValues(t, find.Tx, ptx.Proof.Data) { - assert.NoError(t, ptx.Proof.Proof.Verify(ptx.Proof.RootHash, find.Hash)) - } - - // query by height - result, err = c.TxSearch(context.Background(), fmt.Sprintf("tx.height=%d", find.Height), true, nil, nil, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 1) - - // query for non existing tx - result, err = c.TxSearch(context.Background(), fmt.Sprintf("tx.hash='%X'", anotherTxHash), false, nil, nil, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 0) // query using a compositeKey (see kvstore application) - result, err = c.TxSearch(context.Background(), "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") + result, err := timeoutClient.TxSearch(ctx, "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") require.Nil(t, err) require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + }) + t.Run("TxSearch", func(t *testing.T) { + t.Skip("Test Asserts Non-Deterministic Results") + c := getHTTPClient(t, conf) - // query using an index key - result, err = c.TxSearch(context.Background(), "app.index_key='index is working'", false, nil, nil, "asc") - require.Nil(t, err) - require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") - - // query using an noindex key - result, err = c.TxSearch(context.Background(), "app.noindex_key='index is working'", false, nil, nil, "asc") - require.Nil(t, err) - require.Equal(t, len(result.Txs), 0, "expected a lot of transactions") - - // query using a compositeKey (see kvstore application) and height - result, err = c.TxSearch(context.Background(), - "app.creator='Cosmoshi Netowoko' AND tx.height<10000", true, nil, nil, "asc") - require.Nil(t, err) - require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") - - // query a non existing tx with page 1 and txsPerPage 1 - perPage := 1 - result, err = c.TxSearch(context.Background(), "app.creator='Cosmoshi Neetowoko'", true, nil, &perPage, "asc") - require.Nil(t, err) - require.Len(t, result.Txs, 0) - - // check sorting - result, err = c.TxSearch(context.Background(), "tx.height >= 1", false, nil, nil, "asc") - require.Nil(t, err) - for k := 0; k < len(result.Txs)-1; k++ { - require.LessOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) - require.LessOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) - } - - result, err = c.TxSearch(context.Background(), "tx.height >= 1", false, nil, nil, "desc") - require.Nil(t, err) - for k := 0; k < len(result.Txs)-1; k++ { - require.GreaterOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) - require.GreaterOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) - } - // check pagination - perPage = 3 - var ( - seen = map[int64]bool{} - maxHeight int64 - pages = int(math.Ceil(float64(txCount) / float64(perPage))) - ) - - for page := 1; page <= pages; page++ { - page := page - result, err := c.TxSearch(context.Background(), "tx.height >= 1", false, &page, &perPage, "asc") + // first we broadcast a few txs + for i := 0; i < 10; i++ { + _, _, tx := MakeTxKV() + _, err := c.BroadcastTxSync(ctx, tx) require.NoError(t, err) - if page < pages { - require.Len(t, result.Txs, perPage) - } else { - require.LessOrEqual(t, len(result.Txs), perPage) - } - require.Equal(t, txCount, result.TotalCount) - for _, tx := range result.Txs { - require.False(t, seen[tx.Height], - "Found duplicate height %v in page %v", tx.Height, page) - require.Greater(t, tx.Height, maxHeight, - "Found decreasing height %v (max seen %v) in page %v", tx.Height, maxHeight, page) - seen[tx.Height] = true - maxHeight = tx.Height - } } - require.Len(t, seen, txCount) - } -} -func TestBatchedJSONRPCCalls(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + // since we're not using an isolated test server, we'll have lingering transactions + // from other tests as well + result, err := c.TxSearch(ctx, "tx.height >= 0", true, nil, nil, "asc") + require.NoError(t, err) + txCount := len(result.Txs) - _, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - testBatchedJSONRPCCalls(ctx, t, c) + // pick out the last tx to have something to search for in tests + find := result.Txs[len(result.Txs)-1] + anotherTxHash := types.Tx("a different tx").Hash() + + for _, c := range GetClients(t, n, conf) { + t.Run(fmt.Sprintf("%T", c), func(t *testing.T) { + + // now we query for the tx. + result, err := c.TxSearch(ctx, fmt.Sprintf("tx.hash='%v'", find.Hash), true, nil, nil, "asc") + require.Nil(t, err) + require.Len(t, result.Txs, 1) + require.Equal(t, find.Hash, result.Txs[0].Hash) + + ptx := result.Txs[0] + assert.EqualValues(t, find.Height, ptx.Height) + assert.EqualValues(t, find.Tx, ptx.Tx) + assert.Zero(t, ptx.Index) + assert.True(t, ptx.TxResult.IsOK()) + assert.EqualValues(t, find.Hash, ptx.Hash) + + // time to verify the proof + if assert.EqualValues(t, find.Tx, ptx.Proof.Data) { + assert.NoError(t, ptx.Proof.Proof.Verify(ptx.Proof.RootHash, find.Hash)) + } + + // query by height + result, err = c.TxSearch(ctx, fmt.Sprintf("tx.height=%d", find.Height), true, nil, nil, "asc") + require.Nil(t, err) + require.Len(t, result.Txs, 1) + + // query for non existing tx + result, err = c.TxSearch(ctx, fmt.Sprintf("tx.hash='%X'", anotherTxHash), false, nil, nil, "asc") + require.Nil(t, err) + require.Len(t, result.Txs, 0) + + // query using a compositeKey (see kvstore application) + result, err = c.TxSearch(ctx, "app.creator='Cosmoshi Netowoko'", false, nil, nil, "asc") + require.Nil(t, err) + require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + + // query using an index key + result, err = c.TxSearch(ctx, "app.index_key='index is working'", false, nil, nil, "asc") + require.Nil(t, err) + require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + + // query using an noindex key + result, err = c.TxSearch(ctx, "app.noindex_key='index is working'", false, nil, nil, "asc") + require.Nil(t, err) + require.Equal(t, len(result.Txs), 0, "expected a lot of transactions") + + // query using a compositeKey (see kvstore application) and height + result, err = c.TxSearch(ctx, + "app.creator='Cosmoshi Netowoko' AND tx.height<10000", true, nil, nil, "asc") + require.Nil(t, err) + require.Greater(t, len(result.Txs), 0, "expected a lot of transactions") + + // query a non existing tx with page 1 and txsPerPage 1 + perPage := 1 + result, err = c.TxSearch(ctx, "app.creator='Cosmoshi Neetowoko'", true, nil, &perPage, "asc") + require.Nil(t, err) + require.Len(t, result.Txs, 0) + + // check sorting + result, err = c.TxSearch(ctx, "tx.height >= 1", false, nil, nil, "asc") + require.Nil(t, err) + for k := 0; k < len(result.Txs)-1; k++ { + require.LessOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) + require.LessOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) + } + + result, err = c.TxSearch(ctx, "tx.height >= 1", false, nil, nil, "desc") + require.Nil(t, err) + for k := 0; k < len(result.Txs)-1; k++ { + require.GreaterOrEqual(t, result.Txs[k].Height, result.Txs[k+1].Height) + require.GreaterOrEqual(t, result.Txs[k].Index, result.Txs[k+1].Index) + } + // check pagination + perPage = 3 + var ( + seen = map[int64]bool{} + maxHeight int64 + pages = int(math.Ceil(float64(txCount) / float64(perPage))) + ) + + for page := 1; page <= pages; page++ { + page := page + result, err := c.TxSearch(ctx, "tx.height >= 1", false, &page, &perPage, "asc") + require.NoError(t, err) + if page < pages { + require.Len(t, result.Txs, perPage) + } else { + require.LessOrEqual(t, len(result.Txs), perPage) + } + require.Equal(t, txCount, result.TotalCount) + for _, tx := range result.Txs { + require.False(t, seen[tx.Height], + "Found duplicate height %v in page %v", tx.Height, page) + require.Greater(t, tx.Height, maxHeight, + "Found decreasing height %v (max seen %v) in page %v", tx.Height, maxHeight, page) + seen[tx.Height] = true + maxHeight = tx.Height + } + } + require.Len(t, seen, txCount) + }) + } + }) } func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) { @@ -781,10 +850,10 @@ func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) require.Len(t, bresults, 2) require.Equal(t, 0, batch.Count()) - bresult1, ok := bresults[0].(*ctypes.ResultBroadcastTxCommit) + bresult1, ok := bresults[0].(*coretypes.ResultBroadcastTxCommit) require.True(t, ok) require.Equal(t, *bresult1, *r1) - bresult2, ok := bresults[1].(*ctypes.ResultBroadcastTxCommit) + bresult2, ok := bresults[1].(*coretypes.ResultBroadcastTxCommit) require.True(t, ok) require.Equal(t, *bresult2, *r2) apph := tmmath.MaxInt64(bresult1.Height, bresult2.Height) + 1 @@ -802,10 +871,10 @@ func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) require.Len(t, qresults, 2) require.Equal(t, 0, batch.Count()) - qresult1, ok := qresults[0].(*ctypes.ResultABCIQuery) + qresult1, ok := qresults[0].(*coretypes.ResultABCIQuery) require.True(t, ok) require.Equal(t, *qresult1, *q1) - qresult2, ok := qresults[1].(*ctypes.ResultABCIQuery) + qresult2, ok := qresults[1].(*coretypes.ResultABCIQuery) require.True(t, ok) require.Equal(t, *qresult2, *q2) @@ -814,60 +883,3 @@ func testBatchedJSONRPCCalls(ctx context.Context, t *testing.T, c *rpchttp.HTTP) require.Equal(t, qresult1.Response.Value, v1) require.Equal(t, qresult2.Response.Value, v2) } - -func TestBatchedJSONRPCCallsCancellation(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - _, _, tx1 := MakeTxKV() - _, _, tx2 := MakeTxKV() - - batch := c.NewBatch() - _, err := batch.BroadcastTxCommit(ctx, tx1) - require.NoError(t, err) - _, err = batch.BroadcastTxCommit(ctx, tx2) - require.NoError(t, err) - // we should have 2 requests waiting - require.Equal(t, 2, batch.Count()) - // we want to make sure we cleared 2 pending requests - require.Equal(t, 2, batch.Clear()) - // now there should be no batched requests - require.Equal(t, 0, batch.Count()) -} - -func TestSendingEmptyRequestBatch(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - batch := c.NewBatch() - _, err := batch.Send(ctx) - require.Error(t, err, "sending an empty batch of JSON RPC requests should result in an error") -} - -func TestClearingEmptyRequestBatch(t *testing.T) { - _, conf := NodeSuite(t) - c := getHTTPClient(t, conf) - batch := c.NewBatch() - require.Zero(t, batch.Clear(), "clearing an empty batch of JSON RPC requests should result in a 0 result") -} - -func TestConcurrentJSONRPCBatching(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - _, conf := NodeSuite(t) - var wg sync.WaitGroup - c := getHTTPClient(t, conf) - for i := 0; i < 50; i++ { - wg.Add(1) - go func() { - defer wg.Done() - testBatchedJSONRPCCalls(ctx, t, c) - }() - } - wg.Wait() -} diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go deleted file mode 100644 index 690b0a295..000000000 --- a/rpc/core/mempool.go +++ /dev/null @@ -1,180 +0,0 @@ -package core - -import ( - "context" - "errors" - "fmt" - "time" - - abci "github.com/tendermint/tendermint/abci/types" - mempl "github.com/tendermint/tendermint/internal/mempool" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" -) - -//----------------------------------------------------------------------------- -// NOTE: tx should be signed, but this is only checked at the app level (not by Tendermint!) - -// BroadcastTxAsync returns right away, with no response. Does not wait for -// CheckTx nor DeliverTx results. -// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_async -func (env *Environment) BroadcastTxAsync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - err := env.Mempool.CheckTx(ctx.Context(), tx, nil, mempl.TxInfo{}) - if err != nil { - return nil, err - } - - return &ctypes.ResultBroadcastTx{Hash: tx.Hash()}, nil -} - -// BroadcastTxSync returns with the response from CheckTx. Does not wait for -// DeliverTx result. -// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_sync -func (env *Environment) BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - resCh := make(chan *abci.Response, 1) - err := env.Mempool.CheckTx( - ctx.Context(), - tx, - func(res *abci.Response) { resCh <- res }, - mempl.TxInfo{}, - ) - if err != nil { - return nil, err - } - - res := <-resCh - r := res.GetCheckTx() - - return &ctypes.ResultBroadcastTx{ - Code: r.Code, - Data: r.Data, - Log: r.Log, - Codespace: r.Codespace, - MempoolError: r.MempoolError, - Hash: tx.Hash(), - }, nil -} - -// BroadcastTxCommit returns with the responses from CheckTx and DeliverTx. -// More: https://docs.tendermint.com/master/rpc/#/Tx/broadcast_tx_commit -func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - subscriber := ctx.RemoteAddr() - - if env.EventBus.NumClients() >= env.Config.MaxSubscriptionClients { - return nil, fmt.Errorf("max_subscription_clients %d reached", env.Config.MaxSubscriptionClients) - } else if env.EventBus.NumClientSubscriptions(subscriber) >= env.Config.MaxSubscriptionsPerClient { - return nil, fmt.Errorf("max_subscriptions_per_client %d reached", env.Config.MaxSubscriptionsPerClient) - } - - // Subscribe to tx being committed in block. - subCtx, cancel := context.WithTimeout(ctx.Context(), SubscribeTimeout) - defer cancel() - q := types.EventQueryTxFor(tx) - deliverTxSub, err := env.EventBus.Subscribe(subCtx, subscriber, q) - if err != nil { - err = fmt.Errorf("failed to subscribe to tx: %w", err) - env.Logger.Error("Error on broadcast_tx_commit", "err", err) - return nil, err - } - defer func() { - args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: q} - if err := env.EventBus.Unsubscribe(context.Background(), args); err != nil { - env.Logger.Error("Error unsubscribing from eventBus", "err", err) - } - }() - - // Broadcast tx and wait for CheckTx result - checkTxResCh := make(chan *abci.Response, 1) - err = env.Mempool.CheckTx( - ctx.Context(), - tx, - func(res *abci.Response) { checkTxResCh <- res }, - mempl.TxInfo{}, - ) - if err != nil { - env.Logger.Error("Error on broadcastTxCommit", "err", err) - return nil, fmt.Errorf("error on broadcastTxCommit: %v", err) - } - - checkTxResMsg := <-checkTxResCh - checkTxRes := checkTxResMsg.GetCheckTx() - - if checkTxRes.Code != abci.CodeTypeOK { - return &ctypes.ResultBroadcastTxCommit{ - CheckTx: *checkTxRes, - DeliverTx: abci.ResponseDeliverTx{}, - Hash: tx.Hash(), - }, nil - } - - // Wait for the tx to be included in a block or timeout. - select { - case msg := <-deliverTxSub.Out(): // The tx was included in a block. - deliverTxRes := msg.Data().(types.EventDataTx) - return &ctypes.ResultBroadcastTxCommit{ - CheckTx: *checkTxRes, - DeliverTx: deliverTxRes.Result, - Hash: tx.Hash(), - Height: deliverTxRes.Height, - }, nil - case <-deliverTxSub.Canceled(): - var reason string - if deliverTxSub.Err() == nil { - reason = "Tendermint exited" - } else { - reason = deliverTxSub.Err().Error() - } - err = fmt.Errorf("deliverTxSub was canceled (reason: %s)", reason) - env.Logger.Error("Error on broadcastTxCommit", "err", err) - return &ctypes.ResultBroadcastTxCommit{ - CheckTx: *checkTxRes, - DeliverTx: abci.ResponseDeliverTx{}, - Hash: tx.Hash(), - }, err - case <-time.After(env.Config.TimeoutBroadcastTxCommit): - err = errors.New("timed out waiting for tx to be included in a block") - env.Logger.Error("Error on broadcastTxCommit", "err", err) - return &ctypes.ResultBroadcastTxCommit{ - CheckTx: *checkTxRes, - DeliverTx: abci.ResponseDeliverTx{}, - Hash: tx.Hash(), - }, err - } -} - -// UnconfirmedTxs gets unconfirmed transactions (maximum ?limit entries) -// including their number. -// More: https://docs.tendermint.com/master/rpc/#/Info/unconfirmed_txs -func (env *Environment) UnconfirmedTxs(ctx *rpctypes.Context, limitPtr *int) (*ctypes.ResultUnconfirmedTxs, error) { - // reuse per_page validator - limit := env.validatePerPage(limitPtr) - - txs := env.Mempool.ReapMaxTxs(limit) - return &ctypes.ResultUnconfirmedTxs{ - Count: len(txs), - Total: env.Mempool.Size(), - TotalBytes: env.Mempool.SizeBytes(), - Txs: txs}, nil -} - -// NumUnconfirmedTxs gets number of unconfirmed transactions. -// More: https://docs.tendermint.com/master/rpc/#/Info/num_unconfirmed_txs -func (env *Environment) NumUnconfirmedTxs(ctx *rpctypes.Context) (*ctypes.ResultUnconfirmedTxs, error) { - return &ctypes.ResultUnconfirmedTxs{ - Count: env.Mempool.Size(), - Total: env.Mempool.Size(), - TotalBytes: env.Mempool.SizeBytes()}, nil -} - -// CheckTx checks the transaction without executing it. The transaction won't -// be added to the mempool either. -// More: https://docs.tendermint.com/master/rpc/#/Tx/check_tx -func (env *Environment) CheckTx(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { - res, err := env.ProxyAppMempool.CheckTxSync(ctx.Context(), abci.RequestCheckTx{Tx: tx}) - if err != nil { - return nil, err - } - return &ctypes.ResultCheckTx{ResponseCheckTx: *res}, nil -} diff --git a/rpc/core/net.go b/rpc/core/net.go deleted file mode 100644 index edcf8fffa..000000000 --- a/rpc/core/net.go +++ /dev/null @@ -1,138 +0,0 @@ -package core - -import ( - "errors" - "fmt" - "strings" - - "github.com/tendermint/tendermint/internal/p2p" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" -) - -// NetInfo returns network info. -// More: https://docs.tendermint.com/master/rpc/#/Info/net_info -func (env *Environment) NetInfo(ctx *rpctypes.Context) (*ctypes.ResultNetInfo, error) { - peersList := env.P2PPeers.Peers().List() - peers := make([]ctypes.Peer, 0, len(peersList)) - for _, peer := range peersList { - peers = append(peers, ctypes.Peer{ - NodeInfo: peer.NodeInfo(), - IsOutbound: peer.IsOutbound(), - ConnectionStatus: peer.Status(), - RemoteIP: peer.RemoteIP().String(), - }) - } - // TODO: Should we include PersistentPeers and Seeds in here? - // PRO: useful info - // CON: privacy - return &ctypes.ResultNetInfo{ - Listening: env.P2PTransport.IsListening(), - Listeners: env.P2PTransport.Listeners(), - NPeers: len(peers), - Peers: peers, - }, nil -} - -// UnsafeDialSeeds dials the given seeds (comma-separated id@IP:PORT). -func (env *Environment) UnsafeDialSeeds(ctx *rpctypes.Context, seeds []string) (*ctypes.ResultDialSeeds, error) { - if len(seeds) == 0 { - return &ctypes.ResultDialSeeds{}, fmt.Errorf("%w: no seeds provided", ctypes.ErrInvalidRequest) - } - env.Logger.Info("DialSeeds", "seeds", seeds) - if err := env.P2PPeers.DialPeersAsync(seeds); err != nil { - return &ctypes.ResultDialSeeds{}, err - } - return &ctypes.ResultDialSeeds{Log: "Dialing seeds in progress. See /net_info for details"}, nil -} - -// UnsafeDialPeers dials the given peers (comma-separated id@IP:PORT), -// optionally making them persistent. -func (env *Environment) UnsafeDialPeers( - ctx *rpctypes.Context, - peers []string, - persistent, unconditional, private bool) (*ctypes.ResultDialPeers, error) { - - if len(peers) == 0 { - return &ctypes.ResultDialPeers{}, fmt.Errorf("%w: no peers provided", ctypes.ErrInvalidRequest) - } - - ids, err := getIDs(peers) - if err != nil { - return &ctypes.ResultDialPeers{}, err - } - - env.Logger.Info("DialPeers", "peers", peers, "persistent", - persistent, "unconditional", unconditional, "private", private) - - if persistent { - if err := env.P2PPeers.AddPersistentPeers(peers); err != nil { - return &ctypes.ResultDialPeers{}, err - } - } - - if private { - if err := env.P2PPeers.AddPrivatePeerIDs(ids); err != nil { - return &ctypes.ResultDialPeers{}, err - } - } - - if unconditional { - if err := env.P2PPeers.AddUnconditionalPeerIDs(ids); err != nil { - return &ctypes.ResultDialPeers{}, err - } - } - - if err := env.P2PPeers.DialPeersAsync(peers); err != nil { - return &ctypes.ResultDialPeers{}, err - } - - return &ctypes.ResultDialPeers{Log: "Dialing peers in progress. See /net_info for details"}, nil -} - -// Genesis returns genesis file. -// More: https://docs.tendermint.com/master/rpc/#/Info/genesis -func (env *Environment) Genesis(ctx *rpctypes.Context) (*ctypes.ResultGenesis, error) { - if len(env.genChunks) > 1 { - return nil, errors.New("genesis response is large, please use the genesis_chunked API instead") - } - - return &ctypes.ResultGenesis{Genesis: env.GenDoc}, nil -} - -func (env *Environment) GenesisChunked(ctx *rpctypes.Context, chunk uint) (*ctypes.ResultGenesisChunk, error) { - if env.genChunks == nil { - return nil, fmt.Errorf("service configuration error, genesis chunks are not initialized") - } - - if len(env.genChunks) == 0 { - return nil, fmt.Errorf("service configuration error, there are no chunks") - } - - id := int(chunk) - - if id > len(env.genChunks)-1 { - return nil, fmt.Errorf("there are %d chunks, %d is invalid", len(env.genChunks)-1, id) - } - - return &ctypes.ResultGenesisChunk{ - TotalChunks: len(env.genChunks), - ChunkNumber: id, - Data: env.genChunks[id], - }, nil -} - -func getIDs(peers []string) ([]string, error) { - ids := make([]string, 0, len(peers)) - - for _, peer := range peers { - - spl := strings.Split(peer, "@") - if len(spl) != 2 { - return nil, p2p.ErrNetAddressNoID{Addr: peer} - } - ids = append(ids, spl[0]) - - } - return ids, nil -} diff --git a/rpc/core/net_test.go b/rpc/core/net_test.go deleted file mode 100644 index 821cdb663..000000000 --- a/rpc/core/net_test.go +++ /dev/null @@ -1,89 +0,0 @@ -package core - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/libs/log" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" -) - -func TestUnsafeDialSeeds(t *testing.T) { - sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", - func(n int, sw *p2p.Switch) *p2p.Switch { return sw }, log.TestingLogger()) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - env := &Environment{} - env.Logger = log.TestingLogger() - env.P2PPeers = sw - - testCases := []struct { - seeds []string - isErr bool - }{ - {[]string{}, true}, - {[]string{"d51fb70907db1c6c2d5237e78379b25cf1a37ab4@127.0.0.1:41198"}, false}, - {[]string{"127.0.0.1:41198"}, true}, - } - - for _, tc := range testCases { - res, err := env.UnsafeDialSeeds(&rpctypes.Context{}, tc.seeds) - if tc.isErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.NotNil(t, res) - } - } -} - -func TestUnsafeDialPeers(t *testing.T) { - sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", - func(n int, sw *p2p.Switch) *p2p.Switch { return sw }, log.TestingLogger()) - sw.SetAddrBook(&p2p.AddrBookMock{ - Addrs: make(map[string]struct{}), - OurAddrs: make(map[string]struct{}), - PrivateAddrs: make(map[string]struct{}), - }) - err := sw.Start() - require.NoError(t, err) - t.Cleanup(func() { - if err := sw.Stop(); err != nil { - t.Error(err) - } - }) - - env := &Environment{} - env.Logger = log.TestingLogger() - env.P2PPeers = sw - - testCases := []struct { - peers []string - persistence, unconditional, private bool - isErr bool - }{ - {[]string{}, false, false, false, true}, - {[]string{"d51fb70907db1c6c2d5237e78379b25cf1a37ab4@127.0.0.1:41198"}, true, true, true, false}, - {[]string{"127.0.0.1:41198"}, true, true, false, true}, - } - - for _, tc := range testCases { - res, err := env.UnsafeDialPeers(&rpctypes.Context{}, tc.peers, tc.persistence, tc.unconditional, tc.private) - if tc.isErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - assert.NotNil(t, res) - } - } -} diff --git a/rpc/core/types/responses.go b/rpc/coretypes/responses.go similarity index 93% rename from rpc/core/types/responses.go rename to rpc/coretypes/responses.go index a49e3c0d9..ecb058312 100644 --- a/rpc/core/types/responses.go +++ b/rpc/coretypes/responses.go @@ -7,7 +7,6 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" - "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/bytes" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/types" @@ -101,6 +100,14 @@ type SyncInfo struct { TotalSyncedTime time.Duration `json:"total_synced_time"` RemainingTime time.Duration `json:"remaining_time"` + + TotalSnapshots int64 `json:"total_snapshots"` + ChunkProcessAvgTime time.Duration `json:"chunk_process_avg_time"` + SnapshotHeight int64 `json:"snapshot_height"` + SnapshotChunksCount int64 `json:"snapshot_chunks_count"` + SnapshotChunksTotal int64 `json:"snapshot_chunks_total"` + BackFilledBlocks int64 `json:"backfilled_blocks"` + BackFillBlocksTotal int64 `json:"backfill_blocks_total"` } // Info about the node's validator @@ -145,10 +152,8 @@ type ResultDialPeers struct { // A peer type Peer struct { - NodeInfo types.NodeInfo `json:"node_info"` - IsOutbound bool `json:"is_outbound"` - ConnectionStatus p2p.ConnectionStatus `json:"connection_status"` - RemoteIP string `json:"remote_ip"` + ID types.NodeID `json:"node_id"` + URL string `json:"url"` } // Validators for a height. diff --git a/rpc/core/types/responses_test.go b/rpc/coretypes/responses_test.go similarity index 100% rename from rpc/core/types/responses_test.go rename to rpc/coretypes/responses_test.go diff --git a/rpc/grpc/api.go b/rpc/grpc/api.go deleted file mode 100644 index 27f8c97e4..000000000 --- a/rpc/grpc/api.go +++ /dev/null @@ -1,41 +0,0 @@ -package coregrpc - -import ( - "context" - - abci "github.com/tendermint/tendermint/abci/types" - core "github.com/tendermint/tendermint/rpc/core" - rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" -) - -type broadcastAPI struct { - env *core.Environment -} - -func (bapi *broadcastAPI) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { - // kvstore so we can check if the server is up - return &ResponsePing{}, nil -} - -// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36. -func (bapi *broadcastAPI) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { - // NOTE: there's no way to get client's remote address - // see https://stackoverflow.com/questions/33684570/session-and-remote-ip-address-in-grpc-go - res, err := bapi.env.BroadcastTxCommit(&rpctypes.Context{}, req.Tx) - if err != nil { - return nil, err - } - - return &ResponseBroadcastTx{ - CheckTx: &abci.ResponseCheckTx{ - Code: res.CheckTx.Code, - Data: res.CheckTx.Data, - Log: res.CheckTx.Log, - }, - DeliverTx: &abci.ResponseDeliverTx{ - Code: res.DeliverTx.Code, - Data: res.DeliverTx.Data, - Log: res.DeliverTx.Log, - }, - }, nil -} diff --git a/rpc/grpc/client_server.go b/rpc/grpc/client_server.go deleted file mode 100644 index 2fb0abb67..000000000 --- a/rpc/grpc/client_server.go +++ /dev/null @@ -1,40 +0,0 @@ -package coregrpc - -import ( - "context" - "net" - - "google.golang.org/grpc" - - tmnet "github.com/tendermint/tendermint/libs/net" - "github.com/tendermint/tendermint/rpc/core" -) - -// Config is an gRPC server configuration. -type Config struct { - MaxOpenConnections int -} - -// StartGRPCServer starts a new gRPC BroadcastAPIServer using the given -// net.Listener. -// NOTE: This function blocks - you may want to call it in a go-routine. -// Deprecated: gRPC in the RPC layer of Tendermint will be removed in 0.36 -func StartGRPCServer(env *core.Environment, ln net.Listener) error { - grpcServer := grpc.NewServer() - RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{env: env}) - return grpcServer.Serve(ln) -} - -// StartGRPCClient dials the gRPC server using protoAddr and returns a new -// BroadcastAPIClient. -func StartGRPCClient(protoAddr string) BroadcastAPIClient { - conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc)) - if err != nil { - panic(err) - } - return NewBroadcastAPIClient(conn) -} - -func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { - return tmnet.Connect(addr) -} diff --git a/rpc/grpc/grpc_test.go b/rpc/grpc/grpc_test.go deleted file mode 100644 index 45deb6b76..000000000 --- a/rpc/grpc/grpc_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package coregrpc_test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/libs/service" - core_grpc "github.com/tendermint/tendermint/rpc/grpc" - rpctest "github.com/tendermint/tendermint/rpc/test" -) - -func NodeSuite(t *testing.T) (service.Service, *config.Config) { - t.Helper() - - ctx, cancel := context.WithCancel(context.Background()) - - conf := rpctest.CreateConfig(t.Name()) - - // start a tendermint node in the background to test against - app := kvstore.NewApplication() - - node, closer, err := rpctest.StartTendermint(ctx, conf, app) - require.NoError(t, err) - t.Cleanup(func() { - _ = closer(ctx) - cancel() - }) - return node, conf -} - -func TestBroadcastTx(t *testing.T) { - _, conf := NodeSuite(t) - - res, err := rpctest.GetGRPCClient(conf).BroadcastTx( - context.Background(), - &core_grpc.RequestBroadcastTx{Tx: []byte("this is a tx")}, - ) - require.NoError(t, err) - require.EqualValues(t, 0, res.CheckTx.Code) - require.EqualValues(t, 0, res.DeliverTx.Code) -} diff --git a/rpc/grpc/types.pb.go b/rpc/grpc/types.pb.go deleted file mode 100644 index b9cbee03f..000000000 --- a/rpc/grpc/types.pb.go +++ /dev/null @@ -1,924 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: tendermint/rpc/grpc/types.proto - -package coregrpc - -import ( - context "context" - fmt "fmt" - proto "github.com/gogo/protobuf/proto" - types "github.com/tendermint/tendermint/abci/types" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" - io "io" - math "math" - math_bits "math/bits" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -type RequestPing struct { -} - -func (m *RequestPing) Reset() { *m = RequestPing{} } -func (m *RequestPing) String() string { return proto.CompactTextString(m) } -func (*RequestPing) ProtoMessage() {} -func (*RequestPing) Descriptor() ([]byte, []int) { - return fileDescriptor_0ffff5682c662b95, []int{0} -} -func (m *RequestPing) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestPing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestPing.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestPing) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestPing.Merge(m, src) -} -func (m *RequestPing) XXX_Size() int { - return m.Size() -} -func (m *RequestPing) XXX_DiscardUnknown() { - xxx_messageInfo_RequestPing.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestPing proto.InternalMessageInfo - -type RequestBroadcastTx struct { - Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` -} - -func (m *RequestBroadcastTx) Reset() { *m = RequestBroadcastTx{} } -func (m *RequestBroadcastTx) String() string { return proto.CompactTextString(m) } -func (*RequestBroadcastTx) ProtoMessage() {} -func (*RequestBroadcastTx) Descriptor() ([]byte, []int) { - return fileDescriptor_0ffff5682c662b95, []int{1} -} -func (m *RequestBroadcastTx) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RequestBroadcastTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_RequestBroadcastTx.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *RequestBroadcastTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_RequestBroadcastTx.Merge(m, src) -} -func (m *RequestBroadcastTx) XXX_Size() int { - return m.Size() -} -func (m *RequestBroadcastTx) XXX_DiscardUnknown() { - xxx_messageInfo_RequestBroadcastTx.DiscardUnknown(m) -} - -var xxx_messageInfo_RequestBroadcastTx proto.InternalMessageInfo - -func (m *RequestBroadcastTx) GetTx() []byte { - if m != nil { - return m.Tx - } - return nil -} - -type ResponsePing struct { -} - -func (m *ResponsePing) Reset() { *m = ResponsePing{} } -func (m *ResponsePing) String() string { return proto.CompactTextString(m) } -func (*ResponsePing) ProtoMessage() {} -func (*ResponsePing) Descriptor() ([]byte, []int) { - return fileDescriptor_0ffff5682c662b95, []int{2} -} -func (m *ResponsePing) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponsePing) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponsePing.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponsePing) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponsePing.Merge(m, src) -} -func (m *ResponsePing) XXX_Size() int { - return m.Size() -} -func (m *ResponsePing) XXX_DiscardUnknown() { - xxx_messageInfo_ResponsePing.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponsePing proto.InternalMessageInfo - -type ResponseBroadcastTx struct { - CheckTx *types.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx,proto3" json:"check_tx,omitempty"` - DeliverTx *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=deliver_tx,json=deliverTx,proto3" json:"deliver_tx,omitempty"` -} - -func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} } -func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) } -func (*ResponseBroadcastTx) ProtoMessage() {} -func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { - return fileDescriptor_0ffff5682c662b95, []int{3} -} -func (m *ResponseBroadcastTx) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResponseBroadcastTx) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - if deterministic { - return xxx_messageInfo_ResponseBroadcastTx.Marshal(b, m, deterministic) - } else { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil - } -} -func (m *ResponseBroadcastTx) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResponseBroadcastTx.Merge(m, src) -} -func (m *ResponseBroadcastTx) XXX_Size() int { - return m.Size() -} -func (m *ResponseBroadcastTx) XXX_DiscardUnknown() { - xxx_messageInfo_ResponseBroadcastTx.DiscardUnknown(m) -} - -var xxx_messageInfo_ResponseBroadcastTx proto.InternalMessageInfo - -func (m *ResponseBroadcastTx) GetCheckTx() *types.ResponseCheckTx { - if m != nil { - return m.CheckTx - } - return nil -} - -func (m *ResponseBroadcastTx) GetDeliverTx() *types.ResponseDeliverTx { - if m != nil { - return m.DeliverTx - } - return nil -} - -func init() { - proto.RegisterType((*RequestPing)(nil), "tendermint.rpc.grpc.RequestPing") - proto.RegisterType((*RequestBroadcastTx)(nil), "tendermint.rpc.grpc.RequestBroadcastTx") - proto.RegisterType((*ResponsePing)(nil), "tendermint.rpc.grpc.ResponsePing") - proto.RegisterType((*ResponseBroadcastTx)(nil), "tendermint.rpc.grpc.ResponseBroadcastTx") -} - -func init() { proto.RegisterFile("tendermint/rpc/grpc/types.proto", fileDescriptor_0ffff5682c662b95) } - -var fileDescriptor_0ffff5682c662b95 = []byte{ - // 316 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2f, 0x49, 0xcd, 0x4b, - 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x2f, 0x2a, 0x48, 0xd6, 0x4f, 0x07, 0x11, 0x25, 0x95, - 0x05, 0xa9, 0xc5, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0xc2, 0x08, 0x05, 0x7a, 0x45, 0x05, - 0xc9, 0x7a, 0x20, 0x05, 0x52, 0xd2, 0x48, 0xba, 0x12, 0x93, 0x92, 0x33, 0x91, 0x75, 0x28, 0xf1, - 0x72, 0x71, 0x07, 0xa5, 0x16, 0x96, 0xa6, 0x16, 0x97, 0x04, 0x64, 0xe6, 0xa5, 0x2b, 0xa9, 0x70, - 0x09, 0x41, 0xb9, 0x4e, 0x45, 0xf9, 0x89, 0x29, 0xc9, 0x89, 0xc5, 0x25, 0x21, 0x15, 0x42, 0x7c, - 0x5c, 0x4c, 0x25, 0x15, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0x3c, 0x41, 0x4c, 0x25, 0x15, 0x4a, 0x7c, - 0x5c, 0x3c, 0x41, 0xa9, 0xc5, 0x05, 0xf9, 0x79, 0xc5, 0xa9, 0x60, 0x5d, 0x53, 0x19, 0xb9, 0x84, - 0x61, 0x02, 0xc8, 0xfa, 0xac, 0xb9, 0x38, 0x92, 0x33, 0x52, 0x93, 0xb3, 0xe3, 0xa1, 0xba, 0xb9, - 0x8d, 0x14, 0xf4, 0x90, 0x5c, 0x08, 0x72, 0x8c, 0x1e, 0x4c, 0x9f, 0x33, 0x48, 0x61, 0x48, 0x45, - 0x10, 0x7b, 0x32, 0x84, 0x21, 0xe4, 0xc8, 0xc5, 0x95, 0x92, 0x9a, 0x93, 0x59, 0x96, 0x5a, 0x04, - 0xd2, 0xce, 0x04, 0xd6, 0xae, 0x84, 0x53, 0xbb, 0x0b, 0x44, 0x69, 0x48, 0x45, 0x10, 0x67, 0x0a, - 0x8c, 0x69, 0xb4, 0x97, 0x91, 0x8b, 0x07, 0xee, 0x1e, 0xc7, 0x00, 0x4f, 0x21, 0x6f, 0x2e, 0x16, - 0x90, 0x83, 0x85, 0x50, 0x9c, 0x01, 0x0b, 0x28, 0x3d, 0xa4, 0x80, 0x90, 0x52, 0xc4, 0xa1, 0x02, - 0xe1, 0x6b, 0xa1, 0x04, 0x2e, 0x6e, 0x64, 0xcf, 0xaa, 0xe3, 0x33, 0x13, 0x49, 0xa1, 0x94, 0x06, - 0x5e, 0xa3, 0x91, 0x54, 0x3a, 0xf9, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, - 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, - 0x94, 0x51, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x3e, 0x52, 0xf4, 0x62, - 0x49, 0x1f, 0xd6, 0xc9, 0xf9, 0x45, 0xa9, 0x20, 0x46, 0x12, 0x1b, 0x38, 0xc6, 0x8d, 0x01, 0x01, - 0x00, 0x00, 0xff, 0xff, 0xf6, 0x4b, 0x02, 0xd8, 0x46, 0x02, 0x00, 0x00, -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// BroadcastAPIClient is the client API for BroadcastAPI service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type BroadcastAPIClient interface { - Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) - BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) -} - -type broadcastAPIClient struct { - cc *grpc.ClientConn -} - -func NewBroadcastAPIClient(cc *grpc.ClientConn) BroadcastAPIClient { - return &broadcastAPIClient{cc} -} - -func (c *broadcastAPIClient) Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) { - out := new(ResponsePing) - err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/Ping", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { - out := new(ResponseBroadcastTx) - err := c.cc.Invoke(ctx, "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// BroadcastAPIServer is the server API for BroadcastAPI service. -type BroadcastAPIServer interface { - Ping(context.Context, *RequestPing) (*ResponsePing, error) - BroadcastTx(context.Context, *RequestBroadcastTx) (*ResponseBroadcastTx, error) -} - -// UnimplementedBroadcastAPIServer can be embedded to have forward compatible implementations. -type UnimplementedBroadcastAPIServer struct { -} - -func (*UnimplementedBroadcastAPIServer) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { - return nil, status.Errorf(codes.Unimplemented, "method Ping not implemented") -} -func (*UnimplementedBroadcastAPIServer) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { - return nil, status.Errorf(codes.Unimplemented, "method BroadcastTx not implemented") -} - -func RegisterBroadcastAPIServer(s *grpc.Server, srv BroadcastAPIServer) { - s.RegisterService(&_BroadcastAPI_serviceDesc, srv) -} - -func _BroadcastAPI_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestPing) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BroadcastAPIServer).Ping(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/Ping", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BroadcastAPIServer).Ping(ctx, req.(*RequestPing)) - } - return interceptor(ctx, in, info, handler) -} - -func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestBroadcastTx) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BroadcastAPIServer).BroadcastTx(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/tendermint.rpc.grpc.BroadcastAPI/BroadcastTx", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BroadcastAPIServer).BroadcastTx(ctx, req.(*RequestBroadcastTx)) - } - return interceptor(ctx, in, info, handler) -} - -var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ - ServiceName: "tendermint.rpc.grpc.BroadcastAPI", - HandlerType: (*BroadcastAPIServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Ping", - Handler: _BroadcastAPI_Ping_Handler, - }, - { - MethodName: "BroadcastTx", - Handler: _BroadcastAPI_BroadcastTx_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "tendermint/rpc/grpc/types.proto", -} - -func (m *RequestPing) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestPing) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestPing) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *RequestBroadcastTx) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RequestBroadcastTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RequestBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Tx) > 0 { - i -= len(m.Tx) - copy(dAtA[i:], m.Tx) - i = encodeVarintTypes(dAtA, i, uint64(len(m.Tx))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *ResponsePing) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponsePing) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponsePing) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - return len(dAtA) - i, nil -} - -func (m *ResponseBroadcastTx) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResponseBroadcastTx) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResponseBroadcastTx) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DeliverTx != nil { - { - size, err := m.DeliverTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.CheckTx != nil { - { - size, err := m.CheckTx.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintTypes(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { - offset -= sovTypes(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *RequestPing) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *RequestBroadcastTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Tx) - if l > 0 { - n += 1 + l + sovTypes(uint64(l)) - } - return n -} - -func (m *ResponsePing) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *ResponseBroadcastTx) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.CheckTx != nil { - l = m.CheckTx.Size() - n += 1 + l + sovTypes(uint64(l)) - } - if m.DeliverTx != nil { - l = m.DeliverTx.Size() - n += 1 + l + sovTypes(uint64(l)) - } - return n -} - -func sovTypes(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozTypes(x uint64) (n int) { - return sovTypes(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *RequestPing) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestPing: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestPing: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RequestBroadcastTx) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RequestBroadcastTx: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RequestBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tx", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tx = append(m.Tx[:0], dAtA[iNdEx:postIndex]...) - if m.Tx == nil { - m.Tx = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResponsePing) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponsePing: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponsePing: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResponseBroadcastTx) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResponseBroadcastTx: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResponseBroadcastTx: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CheckTx", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CheckTx == nil { - m.CheckTx = &types.ResponseCheckTx{} - } - if err := m.CheckTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTypes - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTypes - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthTypes - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DeliverTx == nil { - m.DeliverTx = &types.ResponseDeliverTx{} - } - if err := m.DeliverTx.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTypes(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthTypes - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTypes(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTypes - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthTypes - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupTypes - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthTypes - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthTypes = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupTypes = fmt.Errorf("proto: unexpected end of group") -) diff --git a/rpc/jsonrpc/client/decode.go b/rpc/jsonrpc/client/decode.go index 42941ea68..f69926cb7 100644 --- a/rpc/jsonrpc/client/decode.go +++ b/rpc/jsonrpc/client/decode.go @@ -6,18 +6,18 @@ import ( "fmt" tmjson "github.com/tendermint/tendermint/libs/json" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func unmarshalResponseBytes( responseBytes []byte, - expectedID types.JSONRPCIntID, + expectedID rpctypes.JSONRPCIntID, result interface{}, ) (interface{}, error) { // Read response. If rpc/core/types is imported, the result will unmarshal // into the correct type. - response := &types.RPCResponse{} + response := &rpctypes.RPCResponse{} if err := json.Unmarshal(responseBytes, response); err != nil { return nil, fmt.Errorf("error unmarshaling: %w", err) } @@ -40,12 +40,12 @@ func unmarshalResponseBytes( func unmarshalResponseBytesArray( responseBytes []byte, - expectedIDs []types.JSONRPCIntID, + expectedIDs []rpctypes.JSONRPCIntID, results []interface{}, ) ([]interface{}, error) { var ( - responses []types.RPCResponse + responses []rpctypes.RPCResponse ) if err := json.Unmarshal(responseBytes, &responses); err != nil { @@ -64,10 +64,10 @@ func unmarshalResponseBytesArray( } // Intersect IDs from responses with expectedIDs. - ids := make([]types.JSONRPCIntID, len(responses)) + ids := make([]rpctypes.JSONRPCIntID, len(responses)) var ok bool for i, resp := range responses { - ids[i], ok = resp.ID.(types.JSONRPCIntID) + ids[i], ok = resp.ID.(rpctypes.JSONRPCIntID) if !ok { return nil, fmt.Errorf("expected JSONRPCIntID, got %T", resp.ID) } @@ -85,8 +85,8 @@ func unmarshalResponseBytesArray( return results, nil } -func validateResponseIDs(ids, expectedIDs []types.JSONRPCIntID) error { - m := make(map[types.JSONRPCIntID]bool, len(expectedIDs)) +func validateResponseIDs(ids, expectedIDs []rpctypes.JSONRPCIntID) error { + m := make(map[rpctypes.JSONRPCIntID]bool, len(expectedIDs)) for _, expectedID := range expectedIDs { m[expectedID] = true } @@ -104,11 +104,11 @@ func validateResponseIDs(ids, expectedIDs []types.JSONRPCIntID) error { // From the JSON-RPC 2.0 spec: // id: It MUST be the same as the value of the id member in the Request Object. -func validateAndVerifyID(res *types.RPCResponse, expectedID types.JSONRPCIntID) error { +func validateAndVerifyID(res *rpctypes.RPCResponse, expectedID rpctypes.JSONRPCIntID) error { if err := validateResponseID(res.ID); err != nil { return err } - if expectedID != res.ID.(types.JSONRPCIntID) { // validateResponseID ensured res.ID has the right type + if expectedID != res.ID.(rpctypes.JSONRPCIntID) { // validateResponseID ensured res.ID has the right type return fmt.Errorf("response ID (%d) does not match request ID (%d)", res.ID, expectedID) } return nil @@ -118,7 +118,7 @@ func validateResponseID(id interface{}) error { if id == nil { return errors.New("no ID") } - _, ok := id.(types.JSONRPCIntID) + _, ok := id.(rpctypes.JSONRPCIntID) if !ok { return fmt.Errorf("expected JSONRPCIntID, but got: %T", id) } diff --git a/rpc/jsonrpc/client/http_json_client.go b/rpc/jsonrpc/client/http_json_client.go index 71c00137b..7733eb00c 100644 --- a/rpc/jsonrpc/client/http_json_client.go +++ b/rpc/jsonrpc/client/http_json_client.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io/ioutil" "net" @@ -13,7 +14,7 @@ import ( "time" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) const ( @@ -155,7 +156,7 @@ func New(remote string) (*Client, error) { // panics when client is nil. func NewWithHTTPClient(remote string, c *http.Client) (*Client, error) { if c == nil { - panic("nil http.Client") + return nil, errors.New("nil client") } parsedURL, err := newParsedURL(remote) @@ -189,7 +190,7 @@ func (c *Client) Call( ) (interface{}, error) { id := c.nextRequestID() - request, err := types.MapToRequest(id, method, params) + request, err := rpctypes.MapToRequest(id, method, params) if err != nil { return nil, fmt.Errorf("failed to encode params: %w", err) } @@ -235,7 +236,7 @@ func (c *Client) NewRequestBatch() *RequestBatch { } func (c *Client) sendBatch(ctx context.Context, requests []*jsonRPCBufferedRequest) ([]interface{}, error) { - reqs := make([]types.RPCRequest, 0, len(requests)) + reqs := make([]rpctypes.RPCRequest, 0, len(requests)) results := make([]interface{}, 0, len(requests)) for _, req := range requests { reqs = append(reqs, req.request) @@ -272,20 +273,20 @@ func (c *Client) sendBatch(ctx context.Context, requests []*jsonRPCBufferedReque } // collect ids to check responses IDs in unmarshalResponseBytesArray - ids := make([]types.JSONRPCIntID, len(requests)) + ids := make([]rpctypes.JSONRPCIntID, len(requests)) for i, req := range requests { - ids[i] = req.request.ID.(types.JSONRPCIntID) + ids[i] = req.request.ID.(rpctypes.JSONRPCIntID) } return unmarshalResponseBytesArray(responseBytes, ids, results) } -func (c *Client) nextRequestID() types.JSONRPCIntID { +func (c *Client) nextRequestID() rpctypes.JSONRPCIntID { c.mtx.Lock() id := c.nextReqID c.nextReqID++ c.mtx.Unlock() - return types.JSONRPCIntID(id) + return rpctypes.JSONRPCIntID(id) } //------------------------------------------------------------------------------------ @@ -293,7 +294,7 @@ func (c *Client) nextRequestID() types.JSONRPCIntID { // jsonRPCBufferedRequest encapsulates a single buffered request, as well as its // anticipated response structure. type jsonRPCBufferedRequest struct { - request types.RPCRequest + request rpctypes.RPCRequest result interface{} // The result will be deserialized into this object. } @@ -354,7 +355,7 @@ func (b *RequestBatch) Call( result interface{}, ) (interface{}, error) { id := b.client.nextRequestID() - request, err := types.MapToRequest(id, method, params) + request, err := rpctypes.MapToRequest(id, method, params) if err != nil { return nil, err } diff --git a/rpc/jsonrpc/client/http_uri_client.go b/rpc/jsonrpc/client/http_uri_client.go index 3f376ddb0..cd4ff0686 100644 --- a/rpc/jsonrpc/client/http_uri_client.go +++ b/rpc/jsonrpc/client/http_uri_client.go @@ -7,12 +7,12 @@ import ( "net/http" "strings" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) const ( // URIClientRequestID in a request ID used by URIClient - URIClientRequestID = types.JSONRPCIntID(-1) + URIClientRequestID = rpctypes.JSONRPCIntID(-1) ) // URIClient is a JSON-RPC client, which sends POST form HTTP requests to the diff --git a/rpc/jsonrpc/client/integration_test.go b/rpc/jsonrpc/client/integration_test.go index 228bbb460..26f24d255 100644 --- a/rpc/jsonrpc/client/integration_test.go +++ b/rpc/jsonrpc/client/integration_test.go @@ -1,3 +1,4 @@ +//go:build release // +build release // The code in here is comprehensive as an integration diff --git a/rpc/jsonrpc/client/ws_client.go b/rpc/jsonrpc/client/ws_client.go index f47186429..8d8f9d18d 100644 --- a/rpc/jsonrpc/client/ws_client.go +++ b/rpc/jsonrpc/client/ws_client.go @@ -14,8 +14,8 @@ import ( metrics "github.com/rcrowley/go-metrics" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/libs/service" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + tmclient "github.com/tendermint/tendermint/rpc/client" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // WSOptions for WSClient. @@ -41,6 +41,7 @@ func DefaultWSOptions() WSOptions { // // WSClient is safe for concurrent use by multiple goroutines. type WSClient struct { // nolint: maligned + *tmclient.RunState conn *websocket.Conn Address string // IP:PORT or /path/to/socket @@ -49,16 +50,16 @@ type WSClient struct { // nolint: maligned // Single user facing channel to read RPCResponses from, closed only when the // client is being stopped. - ResponsesCh chan types.RPCResponse + ResponsesCh chan rpctypes.RPCResponse // Callback, which will be called each time after successful reconnect. onReconnect func() // internal channels - send chan types.RPCRequest // user requests - backlog chan types.RPCRequest // stores a single user request received during a conn failure - reconnectAfter chan error // reconnect requests - readRoutineQuit chan struct{} // a way for readRoutine to close writeRoutine + send chan rpctypes.RPCRequest // user requests + backlog chan rpctypes.RPCRequest // stores a single user request received during a conn failure + reconnectAfter chan error // reconnect requests + readRoutineQuit chan struct{} // a way for readRoutine to close writeRoutine // Maximum reconnect attempts (0 or greater; default: 25). maxReconnectAttempts uint @@ -83,8 +84,6 @@ type WSClient struct { // nolint: maligned // Send pings to server with this period. Must be less than readWait. If 0, no pings will be sent. pingPeriod time.Duration - service.BaseService - // Time between sending a ping and receiving a pong. See // https://godoc.org/github.com/rcrowley/go-metrics#Timer. PingPongLatencyTimer metrics.Timer @@ -114,6 +113,7 @@ func NewWSWithOptions(remoteAddr, endpoint string, opts WSOptions) (*WSClient, e } c := &WSClient{ + RunState: tmclient.NewRunState("WSClient", nil), Address: parsedURL.GetTrimmedHostWithPath(), Dialer: dialFn, Endpoint: endpoint, @@ -127,7 +127,6 @@ func NewWSWithOptions(remoteAddr, endpoint string, opts WSOptions) (*WSClient, e // sentIDs: make(map[types.JSONRPCIntID]bool), } - c.BaseService = *service.NewBaseService(nil, "WSClient", c) return c, nil } @@ -143,23 +142,25 @@ func (c *WSClient) String() string { return fmt.Sprintf("WSClient{%s (%s)}", c.Address, c.Endpoint) } -// OnStart implements service.Service by dialing a server and creating read and -// write routines. -func (c *WSClient) OnStart() error { +// Start dials the specified service address and starts the I/O routines. +func (c *WSClient) Start() error { + if err := c.RunState.Start(); err != nil { + return err + } err := c.dial() if err != nil { return err } - c.ResponsesCh = make(chan types.RPCResponse) + c.ResponsesCh = make(chan rpctypes.RPCResponse) - c.send = make(chan types.RPCRequest) + c.send = make(chan rpctypes.RPCRequest) // 1 additional error may come from the read/write // goroutine depending on which failed first. c.reconnectAfter = make(chan error, 1) // capacity for 1 request. a user won't be able to send more because the send // channel is unbuffered. - c.backlog = make(chan types.RPCRequest, 1) + c.backlog = make(chan rpctypes.RPCRequest, 1) c.startReadWriteRoutines() go c.reconnectRoutine() @@ -167,10 +168,9 @@ func (c *WSClient) OnStart() error { return nil } -// Stop overrides service.Service#Stop. There is no other way to wait until Quit -// channel is closed. +// Stop shuts down the client. func (c *WSClient) Stop() error { - if err := c.BaseService.Stop(); err != nil { + if err := c.RunState.Stop(); err != nil { return err } // only close user-facing channels when we can't write to them @@ -195,7 +195,7 @@ func (c *WSClient) IsActive() bool { // Send the given RPC request to the server. Results will be available on // ResponsesCh, errors, if any, on ErrorsCh. Will block until send succeeds or // ctx.Done is closed. -func (c *WSClient) Send(ctx context.Context, request types.RPCRequest) error { +func (c *WSClient) Send(ctx context.Context, request rpctypes.RPCRequest) error { select { case c.send <- request: c.Logger.Info("sent a request", "req", request) @@ -210,7 +210,7 @@ func (c *WSClient) Send(ctx context.Context, request types.RPCRequest) error { // Call enqueues a call request onto the Send queue. Requests are JSON encoded. func (c *WSClient) Call(ctx context.Context, method string, params map[string]interface{}) error { - request, err := types.MapToRequest(c.nextRequestID(), method, params) + request, err := rpctypes.MapToRequest(c.nextRequestID(), method, params) if err != nil { return err } @@ -220,7 +220,7 @@ func (c *WSClient) Call(ctx context.Context, method string, params map[string]in // CallWithArrayParams enqueues a call request onto the Send queue. Params are // in a form of array (e.g. []interface{}{"abcd"}). Requests are JSON encoded. func (c *WSClient) CallWithArrayParams(ctx context.Context, method string, params []interface{}) error { - request, err := types.ArrayToRequest(c.nextRequestID(), method, params) + request, err := rpctypes.ArrayToRequest(c.nextRequestID(), method, params) if err != nil { return err } @@ -229,12 +229,12 @@ func (c *WSClient) CallWithArrayParams(ctx context.Context, method string, param // Private methods -func (c *WSClient) nextRequestID() types.JSONRPCIntID { +func (c *WSClient) nextRequestID() rpctypes.JSONRPCIntID { c.mtx.Lock() id := c.nextReqID c.nextReqID++ c.mtx.Unlock() - return types.JSONRPCIntID(id) + return rpctypes.JSONRPCIntID(id) } func (c *WSClient) dial() error { @@ -462,7 +462,7 @@ func (c *WSClient) readRoutine() { return } - var response types.RPCResponse + var response rpctypes.RPCResponse err = json.Unmarshal(data, &response) if err != nil { c.Logger.Error("failed to parse response", "err", err, "data", string(data)) diff --git a/rpc/jsonrpc/client/ws_client_test.go b/rpc/jsonrpc/client/ws_client_test.go index bb8c149f6..208313e79 100644 --- a/rpc/jsonrpc/client/ws_client_test.go +++ b/rpc/jsonrpc/client/ws_client_test.go @@ -14,7 +14,7 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) var wsCallTimeout = 5 * time.Second @@ -41,7 +41,7 @@ func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { return } - var req types.RPCRequest + var req rpctypes.RPCRequest err = json.Unmarshal(in, &req) if err != nil { panic(err) @@ -56,7 +56,7 @@ func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { h.mtx.RUnlock() res := json.RawMessage(`{}`) - emptyRespBytes, _ := json.Marshal(types.RPCResponse{Result: res, ID: req.ID}) + emptyRespBytes, _ := json.Marshal(rpctypes.RPCResponse{Result: res, ID: req.ID}) if err := conn.WriteMessage(messageType, emptyRespBytes); err != nil { return } diff --git a/rpc/jsonrpc/jsonrpc_test.go b/rpc/jsonrpc/jsonrpc_test.go index 6e0c03f00..5013590b6 100644 --- a/rpc/jsonrpc/jsonrpc_test.go +++ b/rpc/jsonrpc/jsonrpc_test.go @@ -18,9 +18,9 @@ import ( tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" - client "github.com/tendermint/tendermint/rpc/jsonrpc/client" - server "github.com/tendermint/tendermint/rpc/jsonrpc/server" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + "github.com/tendermint/tendermint/rpc/jsonrpc/client" + "github.com/tendermint/tendermint/rpc/jsonrpc/server" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // Client and Server should work over tcp or unix sockets @@ -64,23 +64,23 @@ var Routes = map[string]*server.RPCFunc{ "echo_int": server.NewRPCFunc(EchoIntResult, "arg", false), } -func EchoResult(ctx *types.Context, v string) (*ResultEcho, error) { +func EchoResult(ctx *rpctypes.Context, v string) (*ResultEcho, error) { return &ResultEcho{v}, nil } -func EchoWSResult(ctx *types.Context, v string) (*ResultEcho, error) { +func EchoWSResult(ctx *rpctypes.Context, v string) (*ResultEcho, error) { return &ResultEcho{v}, nil } -func EchoIntResult(ctx *types.Context, v int) (*ResultEchoInt, error) { +func EchoIntResult(ctx *rpctypes.Context, v int) (*ResultEchoInt, error) { return &ResultEchoInt{v}, nil } -func EchoBytesResult(ctx *types.Context, v []byte) (*ResultEchoBytes, error) { +func EchoBytesResult(ctx *rpctypes.Context, v []byte) (*ResultEchoBytes, error) { return &ResultEchoBytes{v}, nil } -func EchoDataBytesResult(ctx *types.Context, v tmbytes.HexBytes) (*ResultEchoDataBytes, error) { +func EchoDataBytesResult(ctx *rpctypes.Context, v tmbytes.HexBytes) (*ResultEchoDataBytes, error) { return &ResultEchoDataBytes{v}, nil } @@ -110,7 +110,7 @@ func setup() { wm.SetLogger(tcpLogger) mux.HandleFunc(websocketEndpoint, wm.WebsocketHandler) config := server.DefaultConfig() - listener1, err := server.Listen(tcpAddr, config) + listener1, err := server.Listen(tcpAddr, config.MaxOpenConnections) if err != nil { panic(err) } @@ -126,7 +126,7 @@ func setup() { wm = server.NewWebsocketManager(Routes) wm.SetLogger(unixLogger) mux2.HandleFunc(websocketEndpoint, wm.WebsocketHandler) - listener2, err := server.Listen(unixAddr, config) + listener2, err := server.Listen(unixAddr, config.MaxOpenConnections) if err != nil { panic(err) } diff --git a/rpc/jsonrpc/server/http_json_handler.go b/rpc/jsonrpc/server/http_json_handler.go index bbb32b407..fbc0cca79 100644 --- a/rpc/jsonrpc/server/http_json_handler.go +++ b/rpc/jsonrpc/server/http_json_handler.go @@ -12,8 +12,8 @@ import ( tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/rpc/jsonrpc/types" + "github.com/tendermint/tendermint/rpc/coretypes" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // HTTP + JSON handler @@ -23,7 +23,7 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han return func(w http.ResponseWriter, r *http.Request) { b, err := ioutil.ReadAll(r.Body) if err != nil { - res := types.RPCInvalidRequestError(nil, + res := rpctypes.RPCInvalidRequestError(nil, fmt.Errorf("error reading request body: %w", err), ) if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { @@ -41,20 +41,20 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han // first try to unmarshal the incoming request as an array of RPC requests var ( - requests []types.RPCRequest - responses []types.RPCResponse + requests []rpctypes.RPCRequest + responses []rpctypes.RPCResponse ) if err := json.Unmarshal(b, &requests); err != nil { // next, try to unmarshal as a single request - var request types.RPCRequest + var request rpctypes.RPCRequest if err := json.Unmarshal(b, &request); err != nil { - res := types.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err)) + res := rpctypes.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err)) if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { logger.Error("failed to write response", "res", res, "err", wErr) } return } - requests = []types.RPCRequest{request} + requests = []rpctypes.RPCRequest{request} } // Set the default response cache to true unless @@ -77,25 +77,25 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han if len(r.URL.Path) > 1 { responses = append( responses, - types.RPCInvalidRequestError(request.ID, fmt.Errorf("path %s is invalid", r.URL.Path)), + rpctypes.RPCInvalidRequestError(request.ID, fmt.Errorf("path %s is invalid", r.URL.Path)), ) c = false continue } rpcFunc, ok := funcMap[request.Method] if !ok || rpcFunc.ws { - responses = append(responses, types.RPCMethodNotFoundError(request.ID)) + responses = append(responses, rpctypes.RPCMethodNotFoundError(request.ID)) c = false continue } - ctx := &types.Context{JSONReq: &request, HTTPReq: r} + ctx := &rpctypes.Context{JSONReq: &request, HTTPReq: r} args := []reflect.Value{reflect.ValueOf(ctx)} if len(request.Params) > 0 { fnArgs, err := jsonParamsToArgs(rpcFunc, request.Params) if err != nil { responses = append( responses, - types.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), + rpctypes.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), ) c = false continue @@ -114,22 +114,22 @@ func makeJSONRPCHandler(funcMap map[string]*RPCFunc, logger log.Logger) http.Han switch e := err.(type) { // if no error then return a success response case nil: - responses = append(responses, types.NewRPCSuccessResponse(request.ID, result)) + responses = append(responses, rpctypes.NewRPCSuccessResponse(request.ID, result)) // if this already of type RPC error then forward that error - case *types.RPCError: - responses = append(responses, types.NewRPCErrorResponse(request.ID, e.Code, e.Message, e.Data)) + case *rpctypes.RPCError: + responses = append(responses, rpctypes.NewRPCErrorResponse(request.ID, e.Code, e.Message, e.Data)) c = false default: // we need to unwrap the error and parse it accordingly switch errors.Unwrap(err) { // check if the error was due to an invald request - case ctypes.ErrZeroOrNegativeHeight, ctypes.ErrZeroOrNegativePerPage, - ctypes.ErrPageOutOfRange, ctypes.ErrInvalidRequest: - responses = append(responses, types.RPCInvalidRequestError(request.ID, err)) + case coretypes.ErrZeroOrNegativeHeight, coretypes.ErrZeroOrNegativePerPage, + coretypes.ErrPageOutOfRange, coretypes.ErrInvalidRequest: + responses = append(responses, rpctypes.RPCInvalidRequestError(request.ID, err)) c = false // lastly default all remaining errors as internal errors default: // includes ctypes.ErrHeightNotAvailable and ctypes.ErrHeightExceedsChainHead - responses = append(responses, types.RPCInternalError(request.ID, err)) + responses = append(responses, rpctypes.RPCInternalError(request.ID, err)) c = false } } @@ -277,7 +277,7 @@ func writeListOfEndpoints(w http.ResponseWriter, r *http.Request, funcMap map[st w.Write(buf.Bytes()) // nolint: errcheck } -func hasDefaultHeight(r types.RPCRequest, h []reflect.Value) bool { +func hasDefaultHeight(r rpctypes.RPCRequest, h []reflect.Value) bool { switch r.Method { case "block", "block_results", "commit", "consensus_params", "validators": return len(h) < 2 || h[1].IsZero() diff --git a/rpc/jsonrpc/server/http_json_handler_test.go b/rpc/jsonrpc/server/http_json_handler_test.go index 529f7619c..64e7597fd 100644 --- a/rpc/jsonrpc/server/http_json_handler_test.go +++ b/rpc/jsonrpc/server/http_json_handler_test.go @@ -12,13 +12,13 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/log" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func testMux() *http.ServeMux { funcMap := map[string]*RPCFunc{ - "c": NewRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i", false), - "block": NewRPCFunc(func(ctx *types.Context, h int) (string, error) { return "block", nil }, "height", true), + "c": NewRPCFunc(func(ctx *rpctypes.Context, s string, i int) (string, error) { return "foo", nil }, "s,i", false), + "block": NewRPCFunc(func(ctx *rpctypes.Context, h int) (string, error) { return "block", nil }, "height", true), } mux := http.NewServeMux() logger := log.NewNopLogger() @@ -40,21 +40,21 @@ func TestRPCParams(t *testing.T) { expectedID interface{} }{ // bad - {`{"jsonrpc": "2.0", "id": "0"}`, "Method not found", types.JSONRPCStringID("0")}, - {`{"jsonrpc": "2.0", "method": "y", "id": "0"}`, "Method not found", types.JSONRPCStringID("0")}, + {`{"jsonrpc": "2.0", "id": "0"}`, "Method not found", rpctypes.JSONRPCStringID("0")}, + {`{"jsonrpc": "2.0", "method": "y", "id": "0"}`, "Method not found", rpctypes.JSONRPCStringID("0")}, // id not captured in JSON parsing failures {`{"method": "c", "id": "0", "params": a}`, "invalid character", nil}, - {`{"method": "c", "id": "0", "params": ["a"]}`, "got 1", types.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": ["a", "b"]}`, "invalid character", types.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": [1, 1]}`, "of type string", types.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": ["a"]}`, "got 1", rpctypes.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": ["a", "b"]}`, "invalid character", rpctypes.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": [1, 1]}`, "of type string", rpctypes.JSONRPCStringID("0")}, // no ID - notification // {`{"jsonrpc": "2.0", "method": "c", "params": ["a", "10"]}`, false, nil}, // good - {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": null}`, "", types.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": {}}`, "", types.JSONRPCStringID("0")}, - {`{"method": "c", "id": "0", "params": ["a", "10"]}`, "", types.JSONRPCStringID("0")}, + {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": null}`, "", rpctypes.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": {}}`, "", rpctypes.JSONRPCStringID("0")}, + {`{"method": "c", "id": "0", "params": ["a", "10"]}`, "", rpctypes.JSONRPCStringID("0")}, } for i, tt := range tests { @@ -71,9 +71,9 @@ func TestRPCParams(t *testing.T) { continue } - recv := new(types.RPCResponse) + recv := new(rpctypes.RPCResponse) assert.Nil(t, json.Unmarshal(blob, recv), "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) - assert.NotEqual(t, recv, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) + assert.NotEqual(t, recv, new(rpctypes.RPCResponse), "#%d: not expecting a blank RPCResponse", i) assert.Equal(t, tt.expectedID, recv.ID, "#%d: expected ID not matched in RPCResponse", i) if tt.wantErr == "" { assert.Nil(t, recv.Error, "#%d: not expecting an error", i) @@ -93,12 +93,12 @@ func TestJSONRPCID(t *testing.T) { expectedID interface{} }{ // good id - {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": ["a", "10"]}`, false, types.JSONRPCStringID("0")}, - {`{"jsonrpc": "2.0", "method": "c", "id": "abc", "params": ["a", "10"]}`, false, types.JSONRPCStringID("abc")}, - {`{"jsonrpc": "2.0", "method": "c", "id": 0, "params": ["a", "10"]}`, false, types.JSONRPCIntID(0)}, - {`{"jsonrpc": "2.0", "method": "c", "id": 1, "params": ["a", "10"]}`, false, types.JSONRPCIntID(1)}, - {`{"jsonrpc": "2.0", "method": "c", "id": 1.3, "params": ["a", "10"]}`, false, types.JSONRPCIntID(1)}, - {`{"jsonrpc": "2.0", "method": "c", "id": -1, "params": ["a", "10"]}`, false, types.JSONRPCIntID(-1)}, + {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": ["a", "10"]}`, false, rpctypes.JSONRPCStringID("0")}, + {`{"jsonrpc": "2.0", "method": "c", "id": "abc", "params": ["a", "10"]}`, false, rpctypes.JSONRPCStringID("abc")}, + {`{"jsonrpc": "2.0", "method": "c", "id": 0, "params": ["a", "10"]}`, false, rpctypes.JSONRPCIntID(0)}, + {`{"jsonrpc": "2.0", "method": "c", "id": 1, "params": ["a", "10"]}`, false, rpctypes.JSONRPCIntID(1)}, + {`{"jsonrpc": "2.0", "method": "c", "id": 1.3, "params": ["a", "10"]}`, false, rpctypes.JSONRPCIntID(1)}, + {`{"jsonrpc": "2.0", "method": "c", "id": -1, "params": ["a", "10"]}`, false, rpctypes.JSONRPCIntID(-1)}, // bad id {`{"jsonrpc": "2.0", "method": "c", "id": {}, "params": ["a", "10"]}`, true, nil}, @@ -119,11 +119,11 @@ func TestJSONRPCID(t *testing.T) { } res.Body.Close() - recv := new(types.RPCResponse) + recv := new(rpctypes.RPCResponse) err = json.Unmarshal(blob, recv) assert.Nil(t, err, "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) if !tt.wantErr { - assert.NotEqual(t, recv, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) + assert.NotEqual(t, recv, new(rpctypes.RPCResponse), "#%d: not expecting a blank RPCResponse", i) assert.Equal(t, tt.expectedID, recv.ID, "#%d: expected ID not matched in RPCResponse", i) assert.Nil(t, recv.Error, "#%d: not expecting an error", i) } else { @@ -185,7 +185,7 @@ func TestRPCNotificationInBatch(t *testing.T) { } res.Body.Close() - var responses []types.RPCResponse + var responses []rpctypes.RPCResponse // try to unmarshal an array first err = json.Unmarshal(blob, &responses) if err != nil { @@ -195,14 +195,14 @@ func TestRPCNotificationInBatch(t *testing.T) { continue } else { // we were expecting an error here, so let's unmarshal a single response - var response types.RPCResponse + var response rpctypes.RPCResponse err = json.Unmarshal(blob, &response) if err != nil { t.Errorf("#%d: expected successful parsing of an RPCResponse\nblob: %s", i, blob) continue } // have a single-element result - responses = []types.RPCResponse{response} + responses = []rpctypes.RPCResponse{response} } } if tt.expectCount != len(responses) { @@ -210,7 +210,7 @@ func TestRPCNotificationInBatch(t *testing.T) { continue } for _, response := range responses { - assert.NotEqual(t, response, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) + assert.NotEqual(t, response, new(rpctypes.RPCResponse), "#%d: not expecting a blank RPCResponse", i) } } } diff --git a/rpc/jsonrpc/server/http_server.go b/rpc/jsonrpc/server/http_server.go index c21c71c49..49e1e510e 100644 --- a/rpc/jsonrpc/server/http_server.go +++ b/rpc/jsonrpc/server/http_server.go @@ -16,7 +16,7 @@ import ( "golang.org/x/net/netutil" "github.com/tendermint/tendermint/libs/log" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // Config is a RPC server configuration. @@ -105,7 +105,7 @@ func ServeTLS( // source: https://www.jsonrpc.org/historical/json-rpc-over-http.html func WriteRPCResponseHTTPError( w http.ResponseWriter, - res types.RPCResponse, + res rpctypes.RPCResponse, ) error { if res.Error == nil { panic("tried to write http error response without RPC error") @@ -134,7 +134,7 @@ func WriteRPCResponseHTTPError( // WriteRPCResponseHTTP marshals res as JSON (with indent) and writes it to w. // If the rpc response can be cached, add cache-control to the response header. -func WriteRPCResponseHTTP(w http.ResponseWriter, c bool, res ...types.RPCResponse) error { +func WriteRPCResponseHTTP(w http.ResponseWriter, c bool, res ...rpctypes.RPCResponse) error { var v interface{} if len(res) == 1 { v = res[0] @@ -189,7 +189,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler if e := recover(); e != nil { // If RPCResponse - if res, ok := e.(types.RPCResponse); ok { + if res, ok := e.(rpctypes.RPCResponse); ok { if wErr := WriteRPCResponseHTTP(rww, false, res); wErr != nil { logger.Error("failed to write response", "res", res, "err", wErr) } @@ -208,7 +208,7 @@ func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler logger.Error("panic in RPC HTTP handler", "err", e, "stack", string(debug.Stack())) - res := types.RPCInternalError(types.JSONRPCIntID(-1), err) + res := rpctypes.RPCInternalError(rpctypes.JSONRPCIntID(-1), err) if wErr := WriteRPCResponseHTTPError(rww, res); wErr != nil { logger.Error("failed to write response", "res", res, "err", wErr) } @@ -261,7 +261,7 @@ func (h maxBytesHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Listen starts a new net.Listener on the given address. // It returns an error if the address is invalid or the call to Listen() fails. -func Listen(addr string, config *Config) (listener net.Listener, err error) { +func Listen(addr string, maxOpenConnections int) (listener net.Listener, err error) { parts := strings.SplitN(addr, "://", 2) if len(parts) != 2 { return nil, fmt.Errorf( @@ -274,8 +274,8 @@ func Listen(addr string, config *Config) (listener net.Listener, err error) { if err != nil { return nil, fmt.Errorf("failed to listen on %v: %v", addr, err) } - if config.MaxOpenConnections > 0 { - listener = netutil.LimitListener(listener, config.MaxOpenConnections) + if maxOpenConnections > 0 { + listener = netutil.LimitListener(listener, maxOpenConnections) } return listener, nil diff --git a/rpc/jsonrpc/server/http_server_test.go b/rpc/jsonrpc/server/http_server_test.go index e7c517cde..39e713565 100644 --- a/rpc/jsonrpc/server/http_server_test.go +++ b/rpc/jsonrpc/server/http_server_test.go @@ -17,7 +17,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/log" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) type sampleResult struct { @@ -39,8 +39,7 @@ func TestMaxOpenConnections(t *testing.T) { fmt.Fprint(w, "some body") }) config := DefaultConfig() - config.MaxOpenConnections = max - l, err := Listen("tcp://127.0.0.1:0", config) + l, err := Listen("tcp://127.0.0.1:0", max) require.NoError(t, err) defer l.Close() go Serve(l, mux, log.TestingLogger(), config) //nolint:errcheck // ignore for tests @@ -108,11 +107,11 @@ func TestServeTLS(t *testing.T) { } func TestWriteRPCResponseHTTP(t *testing.T) { - id := types.JSONRPCIntID(-1) + id := rpctypes.JSONRPCIntID(-1) // one argument w := httptest.NewRecorder() - err := WriteRPCResponseHTTP(w, true, types.NewRPCSuccessResponse(id, &sampleResult{"hello"})) + err := WriteRPCResponseHTTP(w, true, rpctypes.NewRPCSuccessResponse(id, &sampleResult{"hello"})) require.NoError(t, err) resp := w.Result() body, err := ioutil.ReadAll(resp.Body) @@ -133,8 +132,8 @@ func TestWriteRPCResponseHTTP(t *testing.T) { w = httptest.NewRecorder() err = WriteRPCResponseHTTP(w, false, - types.NewRPCSuccessResponse(id, &sampleResult{"hello"}), - types.NewRPCSuccessResponse(id, &sampleResult{"world"})) + rpctypes.NewRPCSuccessResponse(id, &sampleResult{"hello"}), + rpctypes.NewRPCSuccessResponse(id, &sampleResult{"world"})) require.NoError(t, err) resp = w.Result() body, err = ioutil.ReadAll(resp.Body) @@ -163,7 +162,7 @@ func TestWriteRPCResponseHTTP(t *testing.T) { func TestWriteRPCResponseHTTPError(t *testing.T) { w := httptest.NewRecorder() - err := WriteRPCResponseHTTPError(w, types.RPCInternalError(types.JSONRPCIntID(-1), errors.New("foo"))) + err := WriteRPCResponseHTTPError(w, rpctypes.RPCInternalError(rpctypes.JSONRPCIntID(-1), errors.New("foo"))) require.NoError(t, err) resp := w.Result() body, err := ioutil.ReadAll(resp.Body) diff --git a/rpc/jsonrpc/server/http_uri_handler.go b/rpc/jsonrpc/server/http_uri_handler.go index f5a2ecebc..07b3616b4 100644 --- a/rpc/jsonrpc/server/http_uri_handler.go +++ b/rpc/jsonrpc/server/http_uri_handler.go @@ -11,8 +11,8 @@ import ( tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + "github.com/tendermint/tendermint/rpc/coretypes" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // HTTP + URI handler @@ -22,12 +22,12 @@ var reInt = regexp.MustCompile(`^-?[0-9]+$`) // convert from a function name to the http handler func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWriter, *http.Request) { // Always return -1 as there's no ID here. - dummyID := types.JSONRPCIntID(-1) // URIClientRequestID + dummyID := rpctypes.JSONRPCIntID(-1) // URIClientRequestID // Exception for websocket endpoints if rpcFunc.ws { return func(w http.ResponseWriter, r *http.Request) { - res := types.RPCMethodNotFoundError(dummyID) + res := rpctypes.RPCMethodNotFoundError(dummyID) if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { logger.Error("failed to write response", "res", res, "err", wErr) } @@ -38,12 +38,12 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit return func(w http.ResponseWriter, r *http.Request) { logger.Debug("HTTP HANDLER", "req", r) - ctx := &types.Context{HTTPReq: r} + ctx := &rpctypes.Context{HTTPReq: r} args := []reflect.Value{reflect.ValueOf(ctx)} fnArgs, err := httpParamsToArgs(rpcFunc, r) if err != nil { - res := types.RPCInvalidParamsError(dummyID, + res := rpctypes.RPCInvalidParamsError(dummyID, fmt.Errorf("error converting http params to arguments: %w", err), ) if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { @@ -60,29 +60,29 @@ func makeHTTPHandler(rpcFunc *RPCFunc, logger log.Logger) func(http.ResponseWrit switch e := err.(type) { // if no error then return a success response case nil: - res := types.NewRPCSuccessResponse(dummyID, result) + res := rpctypes.NewRPCSuccessResponse(dummyID, result) if wErr := WriteRPCResponseHTTP(w, rpcFunc.cache, res); wErr != nil { logger.Error("failed to write response", "res", res, "err", wErr) } // if this already of type RPC error then forward that error. - case *types.RPCError: - res := types.NewRPCErrorResponse(dummyID, e.Code, e.Message, e.Data) + case *rpctypes.RPCError: + res := rpctypes.NewRPCErrorResponse(dummyID, e.Code, e.Message, e.Data) if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { logger.Error("failed to write response", "res", res, "err", wErr) } default: // we need to unwrap the error and parse it accordingly - var res types.RPCResponse + var res rpctypes.RPCResponse switch errors.Unwrap(err) { - case ctypes.ErrZeroOrNegativeHeight, - ctypes.ErrZeroOrNegativePerPage, - ctypes.ErrPageOutOfRange, - ctypes.ErrInvalidRequest: - res = types.RPCInvalidRequestError(dummyID, err) + case coretypes.ErrZeroOrNegativeHeight, + coretypes.ErrZeroOrNegativePerPage, + coretypes.ErrPageOutOfRange, + coretypes.ErrInvalidRequest: + res = rpctypes.RPCInvalidRequestError(dummyID, err) default: // ctypes.ErrHeightNotAvailable, ctypes.ErrHeightExceedsChainHead: - res = types.RPCInternalError(dummyID, err) + res = rpctypes.RPCInternalError(dummyID, err) } if wErr := WriteRPCResponseHTTPError(w, res); wErr != nil { diff --git a/rpc/jsonrpc/server/parse_test.go b/rpc/jsonrpc/server/parse_test.go index 86316f8e5..92ea6f2c0 100644 --- a/rpc/jsonrpc/server/parse_test.go +++ b/rpc/jsonrpc/server/parse_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/tendermint/tendermint/libs/bytes" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func TestParseJSONMap(t *testing.T) { @@ -134,7 +134,7 @@ func TestParseJSONArray(t *testing.T) { } func TestParseJSONRPC(t *testing.T) { - demo := func(ctx *types.Context, height int, name string) {} + demo := func(ctx *rpctypes.Context, height int, name string) {} call := NewRPCFunc(demo, "height,name", false) cases := []struct { @@ -171,7 +171,7 @@ func TestParseJSONRPC(t *testing.T) { } func TestParseURI(t *testing.T) { - demo := func(ctx *types.Context, height int, name string) {} + demo := func(ctx *rpctypes.Context, height int, name string) {} call := NewRPCFunc(demo, "height,name", false) cases := []struct { diff --git a/rpc/jsonrpc/server/ws_handler.go b/rpc/jsonrpc/server/ws_handler.go index a7b77dbd3..2271d03f8 100644 --- a/rpc/jsonrpc/server/ws_handler.go +++ b/rpc/jsonrpc/server/ws_handler.go @@ -13,9 +13,9 @@ import ( "github.com/gorilla/websocket" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + "github.com/tendermint/tendermint/rpc/client" + "github.com/tendermint/tendermint/rpc/coretypes" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) // WebSocket handler @@ -86,8 +86,8 @@ func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Requ }() // register connection - con := newWSConnection(wsConn, wm.funcMap, wm.wsConnOptions...) - con.SetLogger(wm.logger.With("remote", wsConn.RemoteAddr())) + logger := wm.logger.With("remote", wsConn.RemoteAddr()) + con := newWSConnection(wsConn, wm.funcMap, logger, wm.wsConnOptions...) wm.logger.Info("New websocket connection", "remote", con.remoteAddr) err = con.Start() // BLOCKING if err != nil { @@ -106,12 +106,12 @@ func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Requ // // In case of an error, the connection is stopped. type wsConnection struct { - service.BaseService + *client.RunState remoteAddr string baseConn *websocket.Conn // writeChan is never closed, to allow WriteRPCResponse() to fail. - writeChan chan types.RPCResponse + writeChan chan rpctypes.RPCResponse // chan, which is closed when/if readRoutine errors // used to abort writeRoutine @@ -150,9 +150,11 @@ type wsConnection struct { func newWSConnection( baseConn *websocket.Conn, funcMap map[string]*RPCFunc, + logger log.Logger, options ...func(*wsConnection), ) *wsConnection { wsc := &wsConnection{ + RunState: client.NewRunState("wsConnection", logger), remoteAddr: baseConn.RemoteAddr().String(), baseConn: baseConn, funcMap: funcMap, @@ -166,7 +168,6 @@ func newWSConnection( option(wsc) } wsc.baseConn.SetReadLimit(wsc.readLimit) - wsc.BaseService = *service.NewBaseService(nil, "wsConnection", wsc) return wsc } @@ -218,10 +219,12 @@ func ReadLimit(readLimit int64) func(*wsConnection) { } } -// OnStart implements service.Service by starting the read and write routines. It -// blocks until there's some error. -func (wsc *wsConnection) OnStart() error { - wsc.writeChan = make(chan types.RPCResponse, wsc.writeChanCapacity) +// Start starts the client service routines and blocks until there is an error. +func (wsc *wsConnection) Start() error { + if err := wsc.RunState.Start(); err != nil { + return err + } + wsc.writeChan = make(chan rpctypes.RPCResponse, wsc.writeChanCapacity) // Read subscriptions/unsubscriptions to events go wsc.readRoutine() @@ -231,16 +234,18 @@ func (wsc *wsConnection) OnStart() error { return nil } -// OnStop implements service.Service by unsubscribing remoteAddr from all -// subscriptions. -func (wsc *wsConnection) OnStop() { +// Stop unsubscribes the remote from all subscriptions. +func (wsc *wsConnection) Stop() error { + if err := wsc.RunState.Stop(); err != nil { + return err + } if wsc.onDisconnect != nil { wsc.onDisconnect(wsc.remoteAddr) } - if wsc.ctx != nil { wsc.cancel() } + return nil } // GetRemoteAddr returns the remote address of the underlying connection. @@ -252,7 +257,7 @@ func (wsc *wsConnection) GetRemoteAddr() string { // WriteRPCResponse pushes a response to the writeChan, and blocks until it is // accepted. // It implements WSRPCConnection. It is Goroutine-safe. -func (wsc *wsConnection) WriteRPCResponse(ctx context.Context, resp types.RPCResponse) error { +func (wsc *wsConnection) WriteRPCResponse(ctx context.Context, resp rpctypes.RPCResponse) error { select { case <-wsc.Quit(): return errors.New("connection was stopped") @@ -266,7 +271,7 @@ func (wsc *wsConnection) WriteRPCResponse(ctx context.Context, resp types.RPCRes // TryWriteRPCResponse attempts to push a response to the writeChan, but does // not block. // It implements WSRPCConnection. It is Goroutine-safe -func (wsc *wsConnection) TryWriteRPCResponse(resp types.RPCResponse) bool { +func (wsc *wsConnection) TryWriteRPCResponse(resp rpctypes.RPCResponse) bool { select { case <-wsc.Quit(): return false @@ -299,7 +304,7 @@ func (wsc *wsConnection) readRoutine() { err = fmt.Errorf("WSJSONRPC: %v", r) } wsc.Logger.Error("Panic in WSJSONRPC handler", "err", err, "stack", string(debug.Stack())) - if err := wsc.WriteRPCResponse(writeCtx, types.RPCInternalError(types.JSONRPCIntID(-1), err)); err != nil { + if err := wsc.WriteRPCResponse(writeCtx, rpctypes.RPCInternalError(rpctypes.JSONRPCIntID(-1), err)); err != nil { wsc.Logger.Error("Error writing RPC response", "err", err) } go wsc.readRoutine() @@ -335,11 +340,11 @@ func (wsc *wsConnection) readRoutine() { } dec := json.NewDecoder(r) - var request types.RPCRequest + var request rpctypes.RPCRequest err = dec.Decode(&request) if err != nil { if err := wsc.WriteRPCResponse(writeCtx, - types.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err))); err != nil { + rpctypes.RPCParseError(fmt.Errorf("error unmarshaling request: %w", err))); err != nil { wsc.Logger.Error("Error writing RPC response", "err", err) } continue @@ -358,19 +363,19 @@ func (wsc *wsConnection) readRoutine() { // Now, fetch the RPCFunc and execute it. rpcFunc := wsc.funcMap[request.Method] if rpcFunc == nil { - if err := wsc.WriteRPCResponse(writeCtx, types.RPCMethodNotFoundError(request.ID)); err != nil { + if err := wsc.WriteRPCResponse(writeCtx, rpctypes.RPCMethodNotFoundError(request.ID)); err != nil { wsc.Logger.Error("Error writing RPC response", "err", err) } continue } - ctx := &types.Context{JSONReq: &request, WSConn: wsc} + ctx := &rpctypes.Context{JSONReq: &request, WSConn: wsc} args := []reflect.Value{reflect.ValueOf(ctx)} if len(request.Params) > 0 { fnArgs, err := jsonParamsToArgs(rpcFunc, request.Params) if err != nil { if err := wsc.WriteRPCResponse(writeCtx, - types.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), + rpctypes.RPCInvalidParamsError(request.ID, fmt.Errorf("error converting json params to arguments: %w", err)), ); err != nil { wsc.Logger.Error("Error writing RPC response", "err", err) } @@ -384,27 +389,27 @@ func (wsc *wsConnection) readRoutine() { // TODO: Need to encode args/returns to string if we want to log them wsc.Logger.Info("WSJSONRPC", "method", request.Method) - var resp types.RPCResponse + var resp rpctypes.RPCResponse result, err := unreflectResult(returns) switch e := err.(type) { // if no error then return a success response case nil: - resp = types.NewRPCSuccessResponse(request.ID, result) + resp = rpctypes.NewRPCSuccessResponse(request.ID, result) // if this already of type RPC error then forward that error - case *types.RPCError: - resp = types.NewRPCErrorResponse(request.ID, e.Code, e.Message, e.Data) + case *rpctypes.RPCError: + resp = rpctypes.NewRPCErrorResponse(request.ID, e.Code, e.Message, e.Data) default: // we need to unwrap the error and parse it accordingly switch errors.Unwrap(err) { // check if the error was due to an invald request - case ctypes.ErrZeroOrNegativeHeight, ctypes.ErrZeroOrNegativePerPage, - ctypes.ErrPageOutOfRange, ctypes.ErrInvalidRequest: - resp = types.RPCInvalidRequestError(request.ID, err) + case coretypes.ErrZeroOrNegativeHeight, coretypes.ErrZeroOrNegativePerPage, + coretypes.ErrPageOutOfRange, coretypes.ErrInvalidRequest: + resp = rpctypes.RPCInvalidRequestError(request.ID, err) // lastly default all remaining errors as internal errors default: // includes ctypes.ErrHeightNotAvailable and ctypes.ErrHeightExceedsChainHead - resp = types.RPCInternalError(request.ID, err) + resp = rpctypes.RPCInternalError(request.ID, err) } } diff --git a/rpc/jsonrpc/server/ws_handler_test.go b/rpc/jsonrpc/server/ws_handler_test.go index 42a96d1d3..b691172a4 100644 --- a/rpc/jsonrpc/server/ws_handler_test.go +++ b/rpc/jsonrpc/server/ws_handler_test.go @@ -9,7 +9,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/log" - types "github.com/tendermint/tendermint/rpc/jsonrpc/types" + rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) func TestWebsocketManagerHandler(t *testing.T) { @@ -26,8 +26,8 @@ func TestWebsocketManagerHandler(t *testing.T) { } // check basic functionality works - req, err := types.MapToRequest( - types.JSONRPCStringID("TestWebsocketManager"), + req, err := rpctypes.MapToRequest( + rpctypes.JSONRPCStringID("TestWebsocketManager"), "c", map[string]interface{}{"s": "a", "i": 10}, ) @@ -35,7 +35,7 @@ func TestWebsocketManagerHandler(t *testing.T) { err = c.WriteJSON(req) require.NoError(t, err) - var resp types.RPCResponse + var resp rpctypes.RPCResponse err = c.ReadJSON(&resp) require.NoError(t, err) require.Nil(t, resp.Error) @@ -44,7 +44,7 @@ func TestWebsocketManagerHandler(t *testing.T) { func newWSServer() *httptest.Server { funcMap := map[string]*RPCFunc{ - "c": NewWSRPCFunc(func(ctx *types.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), + "c": NewWSRPCFunc(func(ctx *rpctypes.Context, s string, i int) (string, error) { return "foo", nil }, "s,i"), } wm := NewWebsocketManager(funcMap) wm.SetLogger(log.TestingLogger()) diff --git a/rpc/jsonrpc/test/main.go b/rpc/jsonrpc/test/main.go index 1c949571f..d348e1639 100644 --- a/rpc/jsonrpc/test/main.go +++ b/rpc/jsonrpc/test/main.go @@ -33,7 +33,7 @@ func main() { rpcserver.RegisterRPCFuncs(mux, routes, logger) config := rpcserver.DefaultConfig() - listener, err := rpcserver.Listen("tcp://127.0.0.1:8008", config) + listener, err := rpcserver.Listen("tcp://127.0.0.1:8008", config.MaxOpenConnections) if err != nil { tmos.Exit(err.Error()) } diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index bb35d34ac..83d85be8f 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -237,6 +237,31 @@ paths: application/json: schema: $ref: "#/components/schemas/ErrorResponse" + + /remove_tx: + get: + summary: Removes a transaction from the mempool. + tags: + - TxKey + operationId: remove_tx + parameters: + - in: query + name: txKey + required: true + schema: + type: string + example: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + description: The transaction key + responses: + "200": + description: empty response. + "500": + description: empty error. + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + /subscribe: get: summary: Subscribe for events via WebSocket. @@ -601,6 +626,32 @@ paths: application/json: schema: $ref: "#/components/schemas/ErrorResponse" + /unsafe_flush_mempool: + get: + summary: Flush mempool of all unconfirmed transactions + operationId: unsafe_flush_mempool + tags: + - Unsafe + description: | + Flush flushes out the mempool. It acquires a read-lock, fetches all the + transactions currently in the transaction store and removes each transaction + from the store and all indexes and finally resets the cache. + + Note, flushing the mempool may leave the mempool in an inconsistent state. + responses: + "200": + description: empty answer + content: + application/json: + schema: + $ref: "#/components/schemas/EmptyResponse" + "500": + description: empty error + content: + application/json: + schema: + $ref: "#/components/schemas/ErrorResponse" + /blockchain: get: summary: "Get block headers (max: 20) for minHeight <= height <= maxHeight." @@ -837,7 +888,7 @@ paths: - Info description: | Get genesis document in a paginated/chunked format to make it - easier to iterate through larger gensis structures. + easier to iterate through larger genesis structures. parameters: - in: query name: chunkID @@ -1042,7 +1093,7 @@ paths: - Info responses: "200": - description: List of unconfirmed transactions + description: List of transactions content: application/json: schema: @@ -1132,10 +1183,10 @@ paths: tags: - Info description: | - Get a trasasction + Get a transaction responses: "200": - description: Get a transaction` + description: Get a transaction content: application/json: schema: @@ -1370,6 +1421,27 @@ components: remaining_time: type: string example: "0" + total_snapshots: + type: string + example: "10" + chunk_process_avg_time: + type: string + example: "1000000000" + snapshot_height: + type: string + example: "1262196" + snapshot_chunks_count: + type: string + example: "10" + snapshot_chunks_total: + type: string + example: "100" + backfilled_blocks: + type: string + example: "10" + backfill_blocks_total: + type: string + example: "100" ValidatorInfo: type: object properties: @@ -1476,16 +1548,12 @@ components: Peer: type: object properties: - node_info: - $ref: "#/components/schemas/NodeInfo" - is_outbound: - type: boolean - example: true - connection_status: - $ref: "#/components/schemas/ConnectionStatus" - remote_ip: + node_id: type: string - example: "95.179.155.35" + example: "" + url: + type: string + example: "@95.179.155.35:2385>" NetInfo: type: object properties: @@ -1946,14 +2014,14 @@ components: - "chunk" - "total" - "data" - properties: + properties: chunk: type: integer example: 0 total: type: integer example: 1 - data: + data: type: string example: "Z2VuZXNpcwo=" diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 259450540..189289b0a 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -6,15 +6,14 @@ import ( "os" "time" + abciclient "github.com/tendermint/tendermint/abci/client" abci "github.com/tendermint/tendermint/abci/types" - cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" - nm "github.com/tendermint/tendermint/node" - "github.com/tendermint/tendermint/proxy" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - core_grpc "github.com/tendermint/tendermint/rpc/grpc" + "github.com/tendermint/tendermint/node" + "github.com/tendermint/tendermint/rpc/coretypes" rpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" ) @@ -24,13 +23,14 @@ type Options struct { suppressStdout bool } -func waitForRPC(ctx context.Context, conf *cfg.Config) { +// waitForRPC connects to the RPC service and blocks until a /status call succeeds. +func waitForRPC(ctx context.Context, conf *config.Config) { laddr := conf.RPC.ListenAddress client, err := rpcclient.New(laddr) if err != nil { panic(err) } - result := new(ctypes.ResultStatus) + result := new(coretypes.ResultStatus) for { _, err := client.Call(ctx, "status", map[string]interface{}{}, result) if err == nil { @@ -42,16 +42,6 @@ func waitForRPC(ctx context.Context, conf *cfg.Config) { } } -func waitForGRPC(ctx context.Context, conf *cfg.Config) { - client := GetGRPCClient(conf) - for { - _, err := client.Ping(ctx, &core_grpc.RequestPing{}) - if err == nil { - return - } - } -} - func randPort() int { port, err := tmnet.GetFreePort() if err != nil { @@ -60,33 +50,28 @@ func randPort() int { return port } -func makeAddrs() (string, string, string) { - return fmt.Sprintf("tcp://127.0.0.1:%d", randPort()), - fmt.Sprintf("tcp://127.0.0.1:%d", randPort()), - fmt.Sprintf("tcp://127.0.0.1:%d", randPort()) +// makeAddrs constructs local listener addresses for node services. This +// implementation uses random ports so test instances can run concurrently. +func makeAddrs() (p2pAddr, rpcAddr string) { + const addrTemplate = "tcp://127.0.0.1:%d" + return fmt.Sprintf(addrTemplate, randPort()), fmt.Sprintf(addrTemplate, randPort()) } -func CreateConfig(testName string) *cfg.Config { - c := cfg.ResetTestRoot(testName) +func CreateConfig(testName string) *config.Config { + c := config.ResetTestRoot(testName) - // and we use random ports to run in parallel - tm, rpc, grpc := makeAddrs() - c.P2P.ListenAddress = tm - c.RPC.ListenAddress = rpc + p2pAddr, rpcAddr := makeAddrs() + c.P2P.ListenAddress = p2pAddr + c.RPC.ListenAddress = rpcAddr + c.Consensus.WalPath = "rpc-test" c.RPC.CORSAllowedOrigins = []string{"https://tendermint.com/"} - c.RPC.GRPCListenAddress = grpc return c } -func GetGRPCClient(conf *cfg.Config) core_grpc.BroadcastAPIClient { - grpcAddr := conf.RPC.GRPCListenAddress - return core_grpc.StartGRPCClient(grpcAddr) -} - type ServiceCloser func(context.Context) error func StartTendermint(ctx context.Context, - conf *cfg.Config, + conf *config.Config, app abci.Application, opts ...func(*Options)) (service.Service, ServiceCloser, error) { @@ -100,30 +85,28 @@ func StartTendermint(ctx context.Context, } else { logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) } - papp := proxy.NewLocalClientCreator(app) - node, err := nm.New(conf, logger, papp, nil) + papp := abciclient.NewLocalCreator(app) + tmNode, err := node.New(conf, logger, papp, nil) if err != nil { return nil, func(_ context.Context) error { return nil }, err } - err = node.Start() + err = tmNode.Start() if err != nil { return nil, func(_ context.Context) error { return nil }, err } - // wait for rpc waitForRPC(ctx, conf) - waitForGRPC(ctx, conf) if !nodeOpts.suppressStdout { fmt.Println("Tendermint running!") } - return node, func(ctx context.Context) error { - if err := node.Stop(); err != nil { + return tmNode, func(ctx context.Context) error { + if err := tmNode.Stop(); err != nil { logger.Error("Error when trying to stop node", "err", err) } - node.Wait() + tmNode.Wait() os.RemoveAll(conf.RootDir) return nil }, nil diff --git a/scripts/authors.sh b/scripts/authors.sh index 7aafb0127..49251242e 100755 --- a/scripts/authors.sh +++ b/scripts/authors.sh @@ -1,16 +1,16 @@ #! /bin/bash -# Usage: -# `./authors.sh` -# Print a list of all authors who have committed to develop since master. -# -# `./authors.sh ` -# Lookup the email address on Github and print the associated username +set -euo pipefail -author=$1 +ref=$1 -if [[ "$author" == "" ]]; then - git log master..develop | grep Author | sort | uniq +if [[ ! -z "$ref" ]]; then + git log master..$ref | grep Author | sort | uniq else - curl -s "https://api.github.com/search/users?q=$author+in%3Aemail&type=Users&utf8=%E2%9C%93" | jq .items[0].login +cat << EOF +Usage: + ./authors.sh + Print a list of all authors who have committed to the codebase since the supplied commit ref. +EOF fi + diff --git a/scripts/json2wal/main.go b/scripts/json2wal/main.go index d21dc6c44..6b60ac2fc 100644 --- a/scripts/json2wal/main.go +++ b/scripts/json2wal/main.go @@ -14,7 +14,7 @@ import ( "os" "strings" - cs "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/consensus" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/types" ) @@ -41,7 +41,7 @@ func main() { // because of the byte array in BlockPart // leading to unmarshal error: unexpected end of JSON input br := bufio.NewReaderSize(f, int(2*types.BlockPartSizeBytes)) - dec := cs.NewWALEncoder(walFile) + dec := consensus.NewWALEncoder(walFile) for { msgJSON, _, err := br.ReadLine() @@ -55,7 +55,7 @@ func main() { continue } - var msg cs.TimedWALMessage + var msg consensus.TimedWALMessage err = tmjson.Unmarshal(msgJSON, &msg) if err != nil { panic(fmt.Errorf("failed to unmarshal json: %v", err)) diff --git a/scripts/protocgen.sh b/scripts/protocgen.sh index 51b1cc6d3..1b84c2c56 100755 --- a/scripts/protocgen.sh +++ b/scripts/protocgen.sh @@ -5,5 +5,3 @@ set -eo pipefail buf generate --path proto/tendermint mv ./proto/tendermint/abci/types.pb.go ./abci/types - -mv ./proto/tendermint/rpc/grpc/types.pb.go ./rpc/grpc diff --git a/scripts/wal2json/main.go b/scripts/wal2json/main.go index 886e5402f..5a5a0abac 100644 --- a/scripts/wal2json/main.go +++ b/scripts/wal2json/main.go @@ -12,7 +12,7 @@ import ( "io" "os" - cs "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/internal/consensus" tmjson "github.com/tendermint/tendermint/libs/json" ) @@ -28,7 +28,7 @@ func main() { } defer f.Close() - dec := cs.NewWALDecoder(f) + dec := consensus.NewWALDecoder(f) for { msg, err := dec.Decode() if err == io.EOF { @@ -48,7 +48,7 @@ func main() { } if err == nil { - if endMsg, ok := msg.Msg.(cs.EndHeightMessage); ok { + if endMsg, ok := msg.Msg.(consensus.EndHeightMessage); ok { _, err = os.Stdout.Write([]byte(fmt.Sprintf("ENDHEIGHT %d\n", endMsg.Height))) } } diff --git a/state/indexer/sink/psql/psql.go b/state/indexer/sink/psql/psql.go deleted file mode 100644 index efb539e0b..000000000 --- a/state/indexer/sink/psql/psql.go +++ /dev/null @@ -1,211 +0,0 @@ -package psql - -import ( - "context" - "database/sql" - "errors" - "fmt" - "time" - - sq "github.com/Masterminds/squirrel" - proto "github.com/gogo/protobuf/proto" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/types" -) - -var _ indexer.EventSink = (*EventSink)(nil) - -const ( - TableEventBlock = "block_events" - TableEventTx = "tx_events" - TableResultTx = "tx_results" - DriverName = "postgres" -) - -// EventSink is an indexer backend providing the tx/block index services. -type EventSink struct { - store *sql.DB - chainID string -} - -func NewEventSink(connStr string, chainID string) (indexer.EventSink, *sql.DB, error) { - db, err := sql.Open(DriverName, connStr) - if err != nil { - return nil, nil, err - } - - return &EventSink{ - store: db, - chainID: chainID, - }, db, nil -} - -func (es *EventSink) Type() indexer.EventSinkType { - return indexer.PSQL -} - -func (es *EventSink) IndexBlockEvents(h types.EventDataNewBlockHeader) error { - sqlStmt := sq. - Insert(TableEventBlock). - Columns("key", "value", "height", "type", "created_at", "chain_id"). - PlaceholderFormat(sq.Dollar). - Suffix("ON CONFLICT (key,height)"). - Suffix("DO NOTHING") - - ts := time.Now() - // index the reserved block height index - sqlStmt = sqlStmt. - Values(types.BlockHeightKey, fmt.Sprint(h.Header.Height), h.Header.Height, "", ts, es.chainID) - - // index begin_block events - sqlStmt, err := indexBlockEvents( - sqlStmt, h.ResultBeginBlock.Events, types.EventTypeBeginBlock, h.Header.Height, ts, es.chainID) - if err != nil { - return err - } - - // index end_block events - sqlStmt, err = indexBlockEvents( - sqlStmt, h.ResultEndBlock.Events, types.EventTypeEndBlock, h.Header.Height, ts, es.chainID) - if err != nil { - return err - } - - _, err = sqlStmt.RunWith(es.store).Exec() - return err -} - -func (es *EventSink) IndexTxEvents(txr []*abci.TxResult) error { - // index the tx result - var txid uint32 - sqlStmtTxResult := sq. - Insert(TableResultTx). - Columns("tx_result", "created_at"). - PlaceholderFormat(sq.Dollar). - RunWith(es.store). - Suffix("ON CONFLICT (tx_result)"). - Suffix("DO NOTHING"). - Suffix("RETURNING \"id\"") - - sqlStmtEvents := sq. - Insert(TableEventTx). - Columns("key", "value", "height", "hash", "tx_result_id", "created_at", "chain_id"). - PlaceholderFormat(sq.Dollar). - Suffix("ON CONFLICT (key,hash)"). - Suffix("DO NOTHING") - - ts := time.Now() - for _, tx := range txr { - txBz, err := proto.Marshal(tx) - if err != nil { - return err - } - - sqlStmtTxResult = sqlStmtTxResult.Values(txBz, ts) - - // execute sqlStmtTxResult db query and retrieve the txid - r, err := sqlStmtTxResult.Query() - if err != nil { - return err - } - defer r.Close() - - if !r.Next() { - return nil - } - - if err := r.Scan(&txid); err != nil { - return err - } - - // index the reserved height and hash indices - hash := fmt.Sprintf("%X", types.Tx(tx.Tx).Hash()) - - sqlStmtEvents = sqlStmtEvents.Values(types.TxHashKey, hash, tx.Height, hash, txid, ts, es.chainID) - sqlStmtEvents = sqlStmtEvents.Values(types.TxHeightKey, fmt.Sprint(tx.Height), tx.Height, hash, txid, ts, es.chainID) - for _, event := range tx.Result.Events { - // only index events with a non-empty type - if len(event.Type) == 0 { - continue - } - - for _, attr := range event.Attributes { - if len(attr.Key) == 0 { - continue - } - - // index if `index: true` is set - compositeTag := fmt.Sprintf("%s.%s", event.Type, attr.Key) - - // ensure event does not conflict with a reserved prefix key - if compositeTag == types.TxHashKey || compositeTag == types.TxHeightKey { - return fmt.Errorf("event type and attribute key \"%s\" is reserved; please use a different key", compositeTag) - } - - if attr.GetIndex() { - sqlStmtEvents = sqlStmtEvents.Values(compositeTag, attr.Value, tx.Height, hash, txid, ts, es.chainID) - } - } - } - } - - // execute sqlStmtEvents db query... - _, err := sqlStmtEvents.RunWith(es.store).Exec() - return err -} - -func (es *EventSink) SearchBlockEvents(ctx context.Context, q *query.Query) ([]int64, error) { - return nil, errors.New("block search is not supported via the postgres event sink") -} - -func (es *EventSink) SearchTxEvents(ctx context.Context, q *query.Query) ([]*abci.TxResult, error) { - return nil, errors.New("tx search is not supported via the postgres event sink") -} - -func (es *EventSink) GetTxByHash(hash []byte) (*abci.TxResult, error) { - return nil, errors.New("getTxByHash is not supported via the postgres event sink") -} - -func (es *EventSink) HasBlock(h int64) (bool, error) { - return false, errors.New("hasBlock is not supported via the postgres event sink") -} - -func indexBlockEvents( - sqlStmt sq.InsertBuilder, - events []abci.Event, - ty string, - height int64, - ts time.Time, - chainID string, -) (sq.InsertBuilder, error) { - for _, event := range events { - // only index events with a non-empty type - if len(event.Type) == 0 { - continue - } - - for _, attr := range event.Attributes { - if len(attr.Key) == 0 { - continue - } - - // index iff the event specified index:true and it's not a reserved event - compositeKey := fmt.Sprintf("%s.%s", event.Type, attr.Key) - if compositeKey == types.BlockHeightKey { - return sqlStmt, fmt.Errorf( - "event type and attribute key \"%s\" is reserved; please use a different key", compositeKey) - } - - if attr.GetIndex() { - sqlStmt = sqlStmt.Values(compositeKey, attr.Value, height, ty, ts, chainID) - } - } - } - return sqlStmt, nil -} - -func (es *EventSink) Stop() error { - return es.store.Close() -} diff --git a/state/indexer/sink/psql/psql_test.go b/state/indexer/sink/psql/psql_test.go deleted file mode 100644 index 0df773a53..000000000 --- a/state/indexer/sink/psql/psql_test.go +++ /dev/null @@ -1,371 +0,0 @@ -package psql - -import ( - "context" - "database/sql" - "errors" - "fmt" - "io/ioutil" - "os" - "testing" - "time" - - sq "github.com/Masterminds/squirrel" - schema "github.com/adlio/schema" - proto "github.com/gogo/protobuf/proto" - _ "github.com/lib/pq" - dockertest "github.com/ory/dockertest" - "github.com/ory/dockertest/docker" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/types" -) - -var db *sql.DB -var resource *dockertest.Resource -var chainID = "test-chainID" - -var ( - user = "postgres" - password = "secret" - port = "5432" - dsn = "postgres://%s:%s@localhost:%s/%s?sslmode=disable" - dbName = "postgres" -) - -func TestType(t *testing.T) { - pool, err := setupDB(t) - require.NoError(t, err) - - psqlSink := &EventSink{store: db, chainID: chainID} - assert.Equal(t, indexer.PSQL, psqlSink.Type()) - require.NoError(t, teardown(t, pool)) -} - -func TestBlockFuncs(t *testing.T) { - pool, err := setupDB(t) - require.NoError(t, err) - - indexer := &EventSink{store: db, chainID: chainID} - require.NoError(t, indexer.IndexBlockEvents(getTestBlockHeader())) - - r, err := verifyBlock(1) - assert.True(t, r) - require.NoError(t, err) - - r, err = verifyBlock(2) - assert.False(t, r) - require.NoError(t, err) - - r, err = indexer.HasBlock(1) - assert.False(t, r) - assert.Equal(t, errors.New("hasBlock is not supported via the postgres event sink"), err) - - r, err = indexer.HasBlock(2) - assert.False(t, r) - assert.Equal(t, errors.New("hasBlock is not supported via the postgres event sink"), err) - - r2, err := indexer.SearchBlockEvents(context.TODO(), nil) - assert.Nil(t, r2) - assert.Equal(t, errors.New("block search is not supported via the postgres event sink"), err) - - require.NoError(t, verifyTimeStamp(TableEventBlock)) - - // try to insert the duplicate block events. - err = indexer.IndexBlockEvents(getTestBlockHeader()) - require.NoError(t, err) - - require.NoError(t, teardown(t, pool)) -} - -func TestTxFuncs(t *testing.T) { - pool, err := setupDB(t) - assert.Nil(t, err) - - indexer := &EventSink{store: db, chainID: chainID} - - txResult := txResultWithEvents([]abci.Event{ - {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, - {Type: "account", Attributes: []abci.EventAttribute{{Key: "owner", Value: "Ivan", Index: true}}}, - {Type: "", Attributes: []abci.EventAttribute{{Key: "not_allowed", Value: "Vlad", Index: true}}}, - }) - err = indexer.IndexTxEvents([]*abci.TxResult{txResult}) - require.NoError(t, err) - - tx, err := verifyTx(types.Tx(txResult.Tx).Hash()) - require.NoError(t, err) - assert.Equal(t, txResult, tx) - - require.NoError(t, verifyTimeStamp(TableEventTx)) - require.NoError(t, verifyTimeStamp(TableResultTx)) - - tx, err = indexer.GetTxByHash(types.Tx(txResult.Tx).Hash()) - assert.Nil(t, tx) - assert.Equal(t, errors.New("getTxByHash is not supported via the postgres event sink"), err) - - r2, err := indexer.SearchTxEvents(context.TODO(), nil) - assert.Nil(t, r2) - assert.Equal(t, errors.New("tx search is not supported via the postgres event sink"), err) - - // try to insert the duplicate tx events. - err = indexer.IndexTxEvents([]*abci.TxResult{txResult}) - require.NoError(t, err) - - assert.Nil(t, teardown(t, pool)) -} - -func TestStop(t *testing.T) { - pool, err := setupDB(t) - require.NoError(t, err) - - indexer := &EventSink{store: db} - require.NoError(t, indexer.Stop()) - - defer db.Close() - require.NoError(t, pool.Purge(resource)) -} - -func getTestBlockHeader() types.EventDataNewBlockHeader { - return types.EventDataNewBlockHeader{ - Header: types.Header{Height: 1}, - ResultBeginBlock: abci.ResponseBeginBlock{ - Events: []abci.Event{ - { - Type: "begin_event", - Attributes: []abci.EventAttribute{ - { - Key: "proposer", - Value: "FCAA001", - Index: true, - }, - }, - }, - }, - }, - ResultEndBlock: abci.ResponseEndBlock{ - Events: []abci.Event{ - { - Type: "end_event", - Attributes: []abci.EventAttribute{ - { - Key: "foo", - Value: "100", - Index: true, - }, - }, - }, - }, - }, - } -} - -func readSchema() ([]*schema.Migration, error) { - - filename := "schema.sql" - contents, err := ioutil.ReadFile(filename) - if err != nil { - return nil, fmt.Errorf("failed to read sql file from '%s': %w", filename, err) - } - - mg := &schema.Migration{} - mg.ID = time.Now().Local().String() + " db schema" - mg.Script = string(contents) - return append([]*schema.Migration{}, mg), nil -} - -func resetDB(t *testing.T) { - q := "DROP TABLE IF EXISTS block_events,tx_events,tx_results" - _, err := db.Exec(q) - - require.NoError(t, err) - - q = "DROP TYPE IF EXISTS block_event_type" - _, err = db.Exec(q) - require.NoError(t, err) -} - -func txResultWithEvents(events []abci.Event) *abci.TxResult { - tx := types.Tx("HELLO WORLD") - return &abci.TxResult{ - Height: 1, - Index: 0, - Tx: tx, - Result: abci.ResponseDeliverTx{ - Data: []byte{0}, - Code: abci.CodeTypeOK, - Log: "", - Events: events, - }, - } -} - -func verifyTx(hash []byte) (*abci.TxResult, error) { - join := fmt.Sprintf("%s ON %s.id = tx_result_id", TableEventTx, TableResultTx) - sqlStmt := sq. - Select("tx_result", fmt.Sprintf("%s.id", TableResultTx), "tx_result_id", "hash", "chain_id"). - Distinct().From(TableResultTx). - InnerJoin(join). - Where(fmt.Sprintf("hash = $1 AND chain_id = '%s'", chainID), fmt.Sprintf("%X", hash)) - - rows, err := sqlStmt.RunWith(db).Query() - if err != nil { - return nil, err - } - - defer rows.Close() - - if rows.Next() { - var txResult []byte - var txResultID, txid int - var h, cid string - err = rows.Scan(&txResult, &txResultID, &txid, &h, &cid) - if err != nil { - return nil, nil - } - - msg := new(abci.TxResult) - err = proto.Unmarshal(txResult, msg) - if err != nil { - return nil, err - } - - return msg, err - } - - // No result - return nil, nil -} - -func verifyTimeStamp(tb string) error { - - // We assume the tx indexing time would not exceed 2 second from now - sqlStmt := sq. - Select(fmt.Sprintf("%s.created_at", tb)). - Distinct().From(tb). - Where(fmt.Sprintf("%s.created_at >= $1", tb), time.Now().Add(-2*time.Second)) - - rows, err := sqlStmt.RunWith(db).Query() - if err != nil { - return err - } - - defer rows.Close() - - if rows.Next() { - var ts string - return rows.Scan(&ts) - } - - return errors.New("no result") -} - -func verifyBlock(h int64) (bool, error) { - sqlStmt := sq. - Select("height"). - Distinct(). - From(TableEventBlock). - Where(fmt.Sprintf("height = %d", h)) - rows, err := sqlStmt.RunWith(db).Query() - if err != nil { - return false, err - } - - defer rows.Close() - - if !rows.Next() { - return false, nil - } - - sqlStmt = sq. - Select("type, height", "chain_id"). - Distinct(). - From(TableEventBlock). - Where(fmt.Sprintf("height = %d AND type = '%s' AND chain_id = '%s'", h, types.EventTypeBeginBlock, chainID)) - - rows, err = sqlStmt.RunWith(db).Query() - if err != nil { - return false, err - } - defer rows.Close() - - if !rows.Next() { - return false, nil - } - - sqlStmt = sq. - Select("type, height"). - Distinct(). - From(TableEventBlock). - Where(fmt.Sprintf("height = %d AND type = '%s'", h, types.EventTypeEndBlock)) - rows, err = sqlStmt.RunWith(db).Query() - - if err != nil { - return false, err - } - defer rows.Close() - - return rows.Next(), nil -} - -func setupDB(t *testing.T) (*dockertest.Pool, error) { - t.Helper() - pool, err := dockertest.NewPool(os.Getenv("DOCKER_URL")) - - require.NoError(t, err) - - resource, err = pool.RunWithOptions(&dockertest.RunOptions{ - Repository: DriverName, - Tag: "13", - Env: []string{ - "POSTGRES_USER=" + user, - "POSTGRES_PASSWORD=" + password, - "POSTGRES_DB=" + dbName, - "listen_addresses = '*'", - }, - ExposedPorts: []string{port}, - }, func(config *docker.HostConfig) { - // set AutoRemove to true so that stopped container goes away by itself - config.AutoRemove = true - config.RestartPolicy = docker.RestartPolicy{ - Name: "no", - } - }) - - require.NoError(t, err) - - // Set the container to expire in a minute to avoid orphaned containers - // hanging around - _ = resource.Expire(60) - - conn := fmt.Sprintf(dsn, user, password, resource.GetPort(port+"/tcp"), dbName) - - if err = pool.Retry(func() error { - var err error - - _, db, err = NewEventSink(conn, chainID) - - if err != nil { - return err - } - - return db.Ping() - }); err != nil { - require.NoError(t, err) - } - - resetDB(t) - - sm, err := readSchema() - assert.Nil(t, err) - assert.Nil(t, schema.NewMigrator().Apply(db, sm)) - return pool, nil -} - -func teardown(t *testing.T, pool *dockertest.Pool) error { - t.Helper() - // When you're done, kill and remove the container - assert.Nil(t, pool.Purge(resource)) - return db.Close() -} diff --git a/state/indexer/sink/psql/schema.sql b/state/indexer/sink/psql/schema.sql deleted file mode 100644 index 0563136e2..000000000 --- a/state/indexer/sink/psql/schema.sql +++ /dev/null @@ -1,32 +0,0 @@ -CREATE TYPE block_event_type AS ENUM ('begin_block', 'end_block', ''); -CREATE TABLE block_events ( - id SERIAL PRIMARY KEY, - key VARCHAR NOT NULL, - value VARCHAR NOT NULL, - height INTEGER NOT NULL, - type block_event_type, - created_at TIMESTAMPTZ NOT NULL, - chain_id VARCHAR NOT NULL, - UNIQUE (key, height) -); -CREATE TABLE tx_results ( - id SERIAL PRIMARY KEY, - tx_result BYTEA NOT NULL, - created_at TIMESTAMPTZ NOT NULL, - UNIQUE (tx_result) -); -CREATE TABLE tx_events ( - id SERIAL PRIMARY KEY, - key VARCHAR NOT NULL, - value VARCHAR NOT NULL, - height INTEGER NOT NULL, - hash VARCHAR NOT NULL, - tx_result_id SERIAL, - created_at TIMESTAMPTZ NOT NULL, - chain_id VARCHAR NOT NULL, - UNIQUE (hash, key), - FOREIGN KEY (tx_result_id) REFERENCES tx_results(id) ON DELETE CASCADE -); -CREATE INDEX idx_block_events_key_value ON block_events(key, value); -CREATE INDEX idx_tx_events_key_value ON tx_events(key, value); -CREATE INDEX idx_tx_events_hash ON tx_events(hash); diff --git a/test/app/grpc_client.go b/test/app/grpc_client.go deleted file mode 100644 index 73022aaf8..000000000 --- a/test/app/grpc_client.go +++ /dev/null @@ -1,42 +0,0 @@ -package main - -import ( - "encoding/hex" - "fmt" - "os" - - "context" - - tmjson "github.com/tendermint/tendermint/libs/json" - coregrpc "github.com/tendermint/tendermint/rpc/grpc" -) - -var grpcAddr = "tcp://localhost:36656" - -func main() { - args := os.Args - if len(args) == 1 { - fmt.Println("Must enter a transaction to send (hex)") - os.Exit(1) - } - tx := args[1] - txBytes, err := hex.DecodeString(tx) - if err != nil { - fmt.Println("Invalid hex", err) - os.Exit(1) - } - - clientGRPC := coregrpc.StartGRPCClient(grpcAddr) - res, err := clientGRPC.BroadcastTx(context.Background(), &coregrpc.RequestBroadcastTx{Tx: txBytes}) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - bz, err := tmjson.Marshal(res) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - fmt.Println(string(bz)) -} diff --git a/test/e2e/Makefile b/test/e2e/Makefile index 38ce809e6..23cf4d039 100644 --- a/test/e2e/Makefile +++ b/test/e2e/Makefile @@ -1,13 +1,10 @@ -all: docker generator runner +all: docker generator runner tests docker: docker build --tag tendermint/e2e-node -f docker/Dockerfile ../.. -# We need to build support for database backends into the app in -# order to build a binary with a Tendermint node in it (for built-in -# ABCI testing). -app: - go build -o build/app -tags badgerdb,boltdb,cleveldb,rocksdb ./app +node: + go build -o build/node -tags badgerdb,boltdb,cleveldb,rocksdb ./node generator: go build -o build/generator ./generator @@ -15,4 +12,7 @@ generator: runner: go build -o build/runner ./runner -.PHONY: all app docker generator runner +tests: + go test -o build/tests ./tests + +.PHONY: all docker generator runner tests node diff --git a/test/e2e/README.md b/test/e2e/README.md index d737120c1..00bce5ad8 100644 --- a/test/e2e/README.md +++ b/test/e2e/README.md @@ -142,10 +142,42 @@ Docker does not enable IPv6 by default. To do so, enter the following in } ``` -## Benchmarking testnets +## Benchmarking Testnets It is also possible to run a simple benchmark on a testnet. This is done through the `benchmark` command. This manages the entire process: setting up the environment, starting the test net, waiting for a considerable amount of blocks to be used (currently 100), and then returning the following metrics from the sample of the blockchain: - Average time to produce a block - Standard deviation of producing a block - Minimum and maximum time to produce a block + +## Running Individual Nodes + +The E2E test harness is designed to run several nodes of varying configurations within docker. It is also possible to run a single node in the case of running larger, geographically-dispersed testnets. To run a single node you can either run: + +**Built-in** + +```bash +make node +tendermint init validator +TMHOME=$HOME/.tendermint ./build/node ./node/built-in.toml +``` + +To make things simpler the e2e application can also be run in the tendermint binary +by running + +```bash +tendermint start --proxy-app e2e +``` + +However this won't offer the same level of configurability of the application. + +**Socket** + +```bash +make node +tendermint init validator +tendermint start +./build/node ./node.socket.toml +``` + +Check `node/config.go` to see how the settings of the test application can be tweaked. \ No newline at end of file diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index 26b10d32a..395c87024 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -1,4 +1,4 @@ -package main +package app import ( "bytes" @@ -11,6 +11,7 @@ import ( "github.com/tendermint/tendermint/abci/example/code" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/version" ) @@ -27,6 +28,55 @@ type Application struct { restoreChunks [][]byte } +// Config allows for the setting of high level parameters for running the e2e Application +// KeyType and ValidatorUpdates must be the same for all nodes running the same application. +type Config struct { + // The directory with which state.json will be persisted in. Usually $HOME/.tendermint/data + Dir string `toml:"dir"` + + // SnapshotInterval specifies the height interval at which the application + // will take state sync snapshots. Defaults to 0 (disabled). + SnapshotInterval uint64 `toml:"snapshot_interval"` + + // RetainBlocks specifies the number of recent blocks to retain. Defaults to + // 0, which retains all blocks. Must be greater that PersistInterval, + // SnapshotInterval and EvidenceAgeHeight. + RetainBlocks uint64 `toml:"retain_blocks"` + + // KeyType sets the curve that will be used by validators. + // Options are ed25519 & secp256k1 + KeyType string `toml:"key_type"` + + // PersistInterval specifies the height interval at which the application + // will persist state to disk. Defaults to 1 (every height), setting this to + // 0 disables state persistence. + PersistInterval uint64 `toml:"persist_interval"` + + // ValidatorUpdates is a map of heights to validator names and their power, + // and will be returned by the ABCI application. For example, the following + // changes the power of validator01 and validator02 at height 1000: + // + // [validator_update.1000] + // validator01 = 20 + // validator02 = 10 + // + // Specifying height 0 returns the validator update during InitChain. The + // application returns the validator updates as-is, i.e. removing a + // validator must be done by returning it with power 0, and any validators + // not specified are not changed. + // + // height <-> pubkey <-> voting power + ValidatorUpdates map[string]map[string]uint8 `toml:"validator_update"` +} + +func DefaultConfig(dir string) *Config { + return &Config{ + PersistInterval: 1, + SnapshotInterval: 100, + Dir: dir, + } +} + // NewApplication creates the application. func NewApplication(cfg *Config) (*Application, error) { state, err := NewState(filepath.Join(cfg.Dir, "state.json"), cfg.PersistInterval) @@ -67,6 +117,11 @@ func (app *Application) InitChain(req abci.RequestInitChain) abci.ResponseInitCh } resp := abci.ResponseInitChain{ AppHash: app.state.Hash, + ConsensusParams: &types.ConsensusParams{ + Version: &types.VersionParams{ + AppVersion: 1, + }, + }, } if resp.Validators, err = app.validatorUpdates(0); err != nil { panic(err) @@ -134,7 +189,11 @@ func (app *Application) Commit() abci.ResponseCommit { if err != nil { panic(err) } - logger.Info("Created state sync snapshot", "height", snapshot.Height) + app.logger.Info("Created state sync snapshot", "height", snapshot.Height) + err = app.snapshots.Prune(maxSnapshotCount) + if err != nil { + app.logger.Error("Failed to prune snapshots", "err", err) + } } retainHeight := int64(0) if app.cfg.RetainBlocks > 0 { diff --git a/test/e2e/app/snapshots.go b/test/e2e/app/snapshots.go index 4ddb7ecdc..6a9c0e0dc 100644 --- a/test/e2e/app/snapshots.go +++ b/test/e2e/app/snapshots.go @@ -1,5 +1,5 @@ // nolint: gosec -package main +package app import ( "encoding/json" @@ -16,6 +16,9 @@ import ( const ( snapshotChunkSize = 1e6 + + // Keep only the most recent 10 snapshots. Older snapshots are pruned + maxSnapshotCount = 10 ) // SnapshotStore stores state sync snapshots. Snapshots are stored simply as @@ -105,6 +108,27 @@ func (s *SnapshotStore) Create(state *State) (abci.Snapshot, error) { return snapshot, nil } +// Prune removes old snapshots ensuring only the most recent n snapshots remain +func (s *SnapshotStore) Prune(n int) error { + s.Lock() + defer s.Unlock() + // snapshots are appended to the metadata struct, hence pruning removes from + // the front of the array + i := 0 + for ; i < len(s.metadata)-n; i++ { + h := s.metadata[i].Height + if err := os.Remove(filepath.Join(s.dir, fmt.Sprintf("%v.json", h))); err != nil { + return err + } + } + + // update metadata by removing the deleted snapshots + pruned := make([]abci.Snapshot, len(s.metadata[i:])) + copy(pruned, s.metadata[i:]) + s.metadata = pruned + return nil +} + // List lists available snapshots. func (s *SnapshotStore) List() ([]*abci.Snapshot, error) { s.RLock() diff --git a/test/e2e/app/state.go b/test/e2e/app/state.go index b34680c1b..441926453 100644 --- a/test/e2e/app/state.go +++ b/test/e2e/app/state.go @@ -1,5 +1,5 @@ //nolint: gosec -package main +package app import ( "crypto/sha256" diff --git a/test/e2e/docker/Dockerfile b/test/e2e/docker/Dockerfile index 68c7bc836..260df23f3 100644 --- a/test/e2e/docker/Dockerfile +++ b/test/e2e/docker/Dockerfile @@ -19,7 +19,7 @@ COPY . . RUN make build && cp build/tendermint /usr/bin/tendermint COPY test/e2e/docker/entrypoint* /usr/bin/ -RUN cd test/e2e && make app && cp build/app /usr/bin/app +RUN cd test/e2e && make node && cp build/node /usr/bin/app # Set up runtime directory. We don't use a separate runtime image since we need # e.g. leveldb and rocksdb which are already installed in the build image. diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index f699b1162..61b4bf7d3 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -15,8 +15,7 @@ var ( // separate testnet for each combination (Cartesian product) of options. testnetCombinations = map[string][]interface{}{ "topology": {"single", "quad", "large"}, - "p2p": {NewP2PMode, LegacyP2PMode, HybridP2PMode}, - "queueType": {"priority"}, // "fifo", "wdrr" + "queueType": {"priority"}, // "fifo" "initialHeight": {0, 1000}, "initialState": { map[string]string{}, @@ -26,24 +25,49 @@ var ( } // The following specify randomly chosen values for testnet nodes. - nodeDatabases = uniformChoice{"goleveldb", "cleveldb", "rocksdb", "boltdb", "badgerdb"} - nodeABCIProtocols = uniformChoice{"unix", "tcp", "builtin", "grpc"} - nodePrivvalProtocols = uniformChoice{"file", "unix", "tcp", "grpc"} - // FIXME: v2 disabled due to flake - nodeBlockSyncs = uniformChoice{"v0"} // "v2" - nodeMempools = uniformChoice{"v0", "v1"} - nodeStateSyncs = uniformChoice{false, true} + nodeDatabases = weightedChoice{ + "goleveldb": 35, + "badgerdb": 35, + "boltdb": 15, + "rocksdb": 10, + "cleveldb": 5, + } + nodeABCIProtocols = weightedChoice{ + "builtin": 50, + "tcp": 20, + "grpc": 20, + "unix": 10, + } + nodePrivvalProtocols = weightedChoice{ + "file": 50, + "grpc": 20, + "tcp": 20, + "unix": 10, + } + nodeMempools = weightedChoice{ + "v0": 20, + "v1": 80, + } + nodeStateSyncs = weightedChoice{ + e2e.StateSyncDisabled: 10, + e2e.StateSyncP2P: 45, + e2e.StateSyncRPC: 45, + } nodePersistIntervals = uniformChoice{0, 1, 5} - nodeSnapshotIntervals = uniformChoice{0, 3} - nodeRetainBlocks = uniformChoice{0, int(e2e.EvidenceAgeHeight), int(e2e.EvidenceAgeHeight) + 5} - nodePerturbations = probSetChoice{ + nodeSnapshotIntervals = uniformChoice{0, 5} + nodeRetainBlocks = uniformChoice{ + 0, + 2 * int(e2e.EvidenceAgeHeight), + 4 * int(e2e.EvidenceAgeHeight), + } + nodePerturbations = probSetChoice{ "disconnect": 0.1, "pause": 0.1, "kill": 0.1, "restart": 0.1, } evidence = uniformChoice{0, 1, 10} - txSize = uniformChoice{1024, 10240} // either 1kb or 10kb + txSize = uniformChoice{1024, 4096} // either 1kb or 4kb ipv6 = uniformChoice{false, true} keyType = uniformChoice{types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1} ) @@ -51,12 +75,6 @@ var ( // Generate generates random testnets using the given RNG. func Generate(r *rand.Rand, opts Options) ([]e2e.Manifest, error) { manifests := []e2e.Manifest{} - switch opts.P2P { - case NewP2PMode, LegacyP2PMode, HybridP2PMode: - testnetCombinations["p2p"] = []interface{}{opts.P2P} - default: - testnetCombinations["p2p"] = []interface{}{NewP2PMode, LegacyP2PMode, HybridP2PMode} - } for _, opt := range combinations(testnetCombinations) { manifest, err := generateTestnet(r, opt) @@ -64,34 +82,33 @@ func Generate(r *rand.Rand, opts Options) ([]e2e.Manifest, error) { return nil, err } - if len(manifest.Nodes) == 1 { - if opt["p2p"] == HybridP2PMode { - continue - } + if len(manifest.Nodes) < opts.MinNetworkSize { + continue } + + if opts.MaxNetworkSize > 0 && len(manifest.Nodes) >= opts.MaxNetworkSize { + continue + } + manifests = append(manifests, manifest) } + return manifests, nil } type Options struct { - P2P P2PMode + MinNetworkSize int + MaxNetworkSize int + NumGroups int + Directory string + Reverse bool } -type P2PMode string - -const ( - NewP2PMode P2PMode = "new" - LegacyP2PMode P2PMode = "legacy" - HybridP2PMode P2PMode = "hybrid" - // mixed means that all combination are generated - MixedP2PMode P2PMode = "mixed" -) - // generateTestnet generates a single testnet with the given options. func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, error) { manifest := e2e.Manifest{ IPv6: ipv6.Choose(r).(bool), + ABCIProtocol: nodeABCIProtocols.Choose(r), InitialHeight: int64(opt["initialHeight"].(int)), InitialState: opt["initialState"].(map[string]string), Validators: &map[string]int64{}, @@ -103,20 +120,6 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er TxSize: int64(txSize.Choose(r).(int)), } - var p2pNodeFactor int - - switch opt["p2p"].(P2PMode) { - case NewP2PMode: - manifest.DisableLegacyP2P = true - case LegacyP2PMode: - manifest.DisableLegacyP2P = false - case HybridP2PMode: - manifest.DisableLegacyP2P = false - p2pNodeFactor = 2 - default: - return manifest, fmt.Errorf("unknown p2p mode %s", opt["p2p"]) - } - var numSeeds, numValidators, numFulls, numLightClients int switch opt["topology"].(string) { case "single": @@ -125,8 +128,8 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er numValidators = 4 case "large": // FIXME Networks are kept small since large ones use too much CPU. - numSeeds = r.Intn(2) - numLightClients = r.Intn(3) + numSeeds = r.Intn(1) + numLightClients = r.Intn(2) numValidators = 4 + r.Intn(4) numFulls = r.Intn(4) default: @@ -135,17 +138,12 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er // First we generate seed nodes, starting at the initial height. for i := 1; i <= numSeeds; i++ { - node := generateNode(r, e2e.ModeSeed, 0, manifest.InitialHeight, false) - - if p2pNodeFactor == 0 { - node.DisableLegacyP2P = manifest.DisableLegacyP2P - } else if p2pNodeFactor%i == 0 { - node.DisableLegacyP2P = !manifest.DisableLegacyP2P - } - + node := generateNode(r, manifest, e2e.ModeSeed, 0, false) manifest.Nodes[fmt.Sprintf("seed%02d", i)] = node } + var numSyncingNodes = 0 + // Next, we generate validators. We make sure a BFT quorum of validators start // at the initial height, and that we have two archive nodes. We also set up // the initial validator set, and validator set updates for delayed nodes. @@ -153,19 +151,13 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er quorum := numValidators*2/3 + 1 for i := 1; i <= numValidators; i++ { startAt := int64(0) - if i > quorum { + if i > quorum && numSyncingNodes < 2 && r.Float64() >= 0.25 { + numSyncingNodes++ startAt = nextStartAt nextStartAt += 5 } name := fmt.Sprintf("validator%02d", i) - node := generateNode( - r, e2e.ModeValidator, startAt, manifest.InitialHeight, i <= 2) - - if p2pNodeFactor == 0 { - node.DisableLegacyP2P = manifest.DisableLegacyP2P - } else if p2pNodeFactor%i == 0 { - node.DisableLegacyP2P = !manifest.DisableLegacyP2P - } + node := generateNode(r, manifest, e2e.ModeValidator, startAt, i <= 2) manifest.Nodes[name] = node @@ -191,17 +183,13 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er // Finally, we generate random full nodes. for i := 1; i <= numFulls; i++ { startAt := int64(0) - if r.Float64() >= 0.5 { + if numSyncingNodes < 2 && r.Float64() >= 0.5 { + numSyncingNodes++ startAt = nextStartAt nextStartAt += 5 } - node := generateNode(r, e2e.ModeFull, startAt, manifest.InitialHeight, false) + node := generateNode(r, manifest, e2e.ModeFull, startAt, false) - if p2pNodeFactor == 0 { - node.DisableLegacyP2P = manifest.DisableLegacyP2P - } else if p2pNodeFactor%i == 0 { - node.DisableLegacyP2P = !manifest.DisableLegacyP2P - } manifest.Nodes[fmt.Sprintf("full%02d", i)] = node } @@ -242,19 +230,32 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er } }) for i, name := range peerNames { - if len(seedNames) > 0 && (i == 0 || r.Float64() >= 0.5) { + // there are seeds, statesync is disabled, and it's + // either the first peer by the sort order, and + // (randomly half of the remaining peers use a seed + // node; otherwise, choose some remaining set of the + // peers. + + if len(seedNames) > 0 && + manifest.Nodes[name].StateSync == e2e.StateSyncDisabled && + (i == 0 || r.Float64() >= 0.5) { + + // choose one of the seeds manifest.Nodes[name].Seeds = uniformSetChoice(seedNames).Choose(r) - } else if i > 0 { - manifest.Nodes[name].PersistentPeers = uniformSetChoice(peerNames[:i]).Choose(r) + } else if i > 1 && r.Float64() >= 0.5 { + peers := uniformSetChoice(peerNames[:i]) + manifest.Nodes[name].PersistentPeers = peers.ChooseAtLeast(r, 2) } } // lastly, set up the light clients for i := 1; i <= numLightClients; i++ { startAt := manifest.InitialHeight + 5 - manifest.Nodes[fmt.Sprintf("light%02d", i)] = generateLightNode( - r, startAt+(5*int64(i)), lightProviders, - ) + + node := generateLightNode(r, startAt+(5*int64(i)), lightProviders) + + manifest.Nodes[fmt.Sprintf("light%02d", i)] = node + } return manifest, nil @@ -265,23 +266,36 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er // here, since we need to know the overall network topology and startup // sequencing. func generateNode( - r *rand.Rand, mode e2e.Mode, startAt int64, initialHeight int64, forceArchive bool, + r *rand.Rand, + manifest e2e.Manifest, + mode e2e.Mode, + startAt int64, + forceArchive bool, ) *e2e.ManifestNode { node := e2e.ManifestNode{ Mode: string(mode), StartAt: startAt, - Database: nodeDatabases.Choose(r).(string), - ABCIProtocol: nodeABCIProtocols.Choose(r).(string), - PrivvalProtocol: nodePrivvalProtocols.Choose(r).(string), - BlockSync: nodeBlockSyncs.Choose(r).(string), - Mempool: nodeMempools.Choose(r).(string), - StateSync: nodeStateSyncs.Choose(r).(bool) && startAt > 0, + Database: nodeDatabases.Choose(r), + PrivvalProtocol: nodePrivvalProtocols.Choose(r), + Mempool: nodeMempools.Choose(r), + StateSync: e2e.StateSyncDisabled, PersistInterval: ptrUint64(uint64(nodePersistIntervals.Choose(r).(int))), SnapshotInterval: uint64(nodeSnapshotIntervals.Choose(r).(int)), RetainBlocks: uint64(nodeRetainBlocks.Choose(r).(int)), Perturb: nodePerturbations.Choose(r), } + if startAt > 0 { + node.StateSync = nodeStateSyncs.Choose(r) + if manifest.InitialHeight-startAt <= 5 && node.StateSync == e2e.StateSyncDisabled { + // avoid needing to blocsync more than five total blocks. + node.StateSync = uniformSetChoice([]string{ + e2e.StateSyncP2P, + e2e.StateSyncRPC, + }).Choose(r)[0] + } + } + // If this node is forced to be an archive node, retain all blocks and // enable state sync snapshotting. if forceArchive { @@ -310,10 +324,6 @@ func generateNode( } } - if node.StateSync { - node.BlockSync = "v0" - } - return &node } @@ -321,8 +331,7 @@ func generateLightNode(r *rand.Rand, startAt int64, providers []string) *e2e.Man return &e2e.ManifestNode{ Mode: string(e2e.ModeLight), StartAt: startAt, - Database: nodeDatabases.Choose(r).(string), - ABCIProtocol: "builtin", + Database: nodeDatabases.Choose(r), PersistInterval: ptrUint64(0), PersistentPeers: providers, } diff --git a/test/e2e/generator/generate_test.go b/test/e2e/generator/generate_test.go new file mode 100644 index 000000000..79a20f27e --- /dev/null +++ b/test/e2e/generator/generate_test.go @@ -0,0 +1,43 @@ +package main + +import ( + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/require" + e2e "github.com/tendermint/tendermint/test/e2e/pkg" +) + +func TestGenerator(t *testing.T) { + manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), Options{}) + require.NoError(t, err) + require.True(t, len(manifests) >= 24, "insufficient combinations %d", len(manifests)) + + // this just means that the numbers reported by the test + // failures map to the test cases that you'd see locally. + e2e.SortManifests(manifests, false /* ascending */) + + for idx, m := range manifests { + t.Run(fmt.Sprintf("Case%04d", idx), func(t *testing.T) { + numStateSyncs := 0 + for name, node := range m.Nodes { + if node.StateSync != e2e.StateSyncDisabled { + numStateSyncs++ + } + t.Run(name, func(t *testing.T) { + if node.StartAt > m.InitialHeight+5 && !node.Stateless() { + require.NotEqual(t, node.StateSync, e2e.StateSyncDisabled) + } + if node.StateSync != e2e.StateSyncDisabled { + require.Zero(t, node.Seeds, node.StateSync) + require.True(t, len(node.PersistentPeers) >= 2 || len(node.PersistentPeers) == 0, + "peers: %v", node.PersistentPeers) + } + + }) + } + require.True(t, numStateSyncs <= 2) + }) + } +} diff --git a/test/e2e/generator/main.go b/test/e2e/generator/main.go index f353241fc..38f36d0da 100644 --- a/test/e2e/generator/main.go +++ b/test/e2e/generator/main.go @@ -3,7 +3,6 @@ package main import ( "fmt" - "math" "math/rand" "os" "path/filepath" @@ -11,6 +10,7 @@ import ( "github.com/spf13/cobra" "github.com/tendermint/tendermint/libs/log" + e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) const ( @@ -26,6 +26,7 @@ func main() { // CLI is the Cobra-based command-line interface. type CLI struct { root *cobra.Command + opts Options } // NewCLI sets up the CLI. @@ -37,69 +38,54 @@ func NewCLI() *CLI { SilenceUsage: true, SilenceErrors: true, // we'll output them ourselves in Run() RunE: func(cmd *cobra.Command, args []string) error { - dir, err := cmd.Flags().GetString("dir") - if err != nil { - return err - } - groups, err := cmd.Flags().GetInt("groups") - if err != nil { - return err - } - p2pMode, err := cmd.Flags().GetString("p2p") - if err != nil { - return err - } - var opts Options - switch mode := P2PMode(p2pMode); mode { - case NewP2PMode, LegacyP2PMode, HybridP2PMode, MixedP2PMode: - opts = Options{P2P: mode} - default: - return fmt.Errorf("p2p mode must be either new, legacy, hybrid or mixed got %s", p2pMode) - } - - return cli.generate(dir, groups, opts) + return cli.generate() }, } - cli.root.PersistentFlags().StringP("dir", "d", "", "Output directory for manifests") + cli.root.PersistentFlags().StringVarP(&cli.opts.Directory, "dir", "d", "", "Output directory for manifests") _ = cli.root.MarkPersistentFlagRequired("dir") - cli.root.PersistentFlags().IntP("groups", "g", 0, "Number of groups") - cli.root.PersistentFlags().StringP("p2p", "p", string(MixedP2PMode), - "P2P typology to be generated [\"new\", \"legacy\", \"hybrid\" or \"mixed\" ]") + cli.root.Flags().BoolVarP(&cli.opts.Reverse, "reverse", "r", false, "Reverse sort order") + cli.root.PersistentFlags().IntVarP(&cli.opts.NumGroups, "groups", "g", 0, "Number of groups") + cli.root.PersistentFlags().IntVarP(&cli.opts.MinNetworkSize, "min-size", "", 1, + "Minimum network size (nodes)") + cli.root.PersistentFlags().IntVarP(&cli.opts.MaxNetworkSize, "max-size", "", 0, + "Maxmum network size (nodes), 0 is unlimited") return cli } // generate generates manifests in a directory. -func (cli *CLI) generate(dir string, groups int, opts Options) error { - err := os.MkdirAll(dir, 0755) +func (cli *CLI) generate() error { + err := os.MkdirAll(cli.opts.Directory, 0755) if err != nil { return err } - manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), opts) + manifests, err := Generate(rand.New(rand.NewSource(randomSeed)), cli.opts) if err != nil { return err } - if groups <= 0 { - for i, manifest := range manifests { - err = manifest.Save(filepath.Join(dir, fmt.Sprintf("gen-%04d.toml", i))) - if err != nil { + + switch { + case cli.opts.NumGroups <= 0: + e2e.SortManifests(manifests, cli.opts.Reverse) + + if err := e2e.WriteManifests(filepath.Join(cli.opts.Directory, "gen"), manifests); err != nil { + return err + } + default: + groupManifests := e2e.SplitGroups(cli.opts.NumGroups, manifests) + + for idx, gm := range groupManifests { + e2e.SortManifests(gm, cli.opts.Reverse) + + prefix := filepath.Join(cli.opts.Directory, fmt.Sprintf("gen-group%02d", idx)) + if err := e2e.WriteManifests(prefix, gm); err != nil { return err } } - } else { - groupSize := int(math.Ceil(float64(len(manifests)) / float64(groups))) - for g := 0; g < groups; g++ { - for i := 0; i < groupSize && g*groupSize+i < len(manifests); i++ { - manifest := manifests[g*groupSize+i] - err = manifest.Save(filepath.Join(dir, fmt.Sprintf("gen-group%02d-%04d.toml", g, i))) - if err != nil { - return err - } - } - } } + return nil } diff --git a/test/e2e/generator/random.go b/test/e2e/generator/random.go index f21502118..c00d56964 100644 --- a/test/e2e/generator/random.go +++ b/test/e2e/generator/random.go @@ -3,6 +3,8 @@ package main import ( "math/rand" "sort" + + "github.com/mroth/weightedrand" ) // combinations takes input in the form of a map of item lists, and returns a @@ -72,14 +74,36 @@ func (pc probSetChoice) Choose(r *rand.Rand) []string { // uniformSetChoice picks a set of strings with uniform probability, picking at least one. type uniformSetChoice []string -func (usc uniformSetChoice) Choose(r *rand.Rand) []string { +func (usc uniformSetChoice) Choose(r *rand.Rand) []string { return usc.ChooseAtLeast(r, 1) } + +func (usc uniformSetChoice) ChooseAtLeast(r *rand.Rand, num int) []string { choices := []string{} indexes := r.Perm(len(usc)) - if len(indexes) > 1 { - indexes = indexes[:1+r.Intn(len(indexes)-1)] + if num < len(indexes) { + indexes = indexes[:1+randomInRange(r, num, len(indexes)-1)] } + for _, i := range indexes { choices = append(choices, usc[i]) } + return choices } + +func randomInRange(r *rand.Rand, min, max int) int { return r.Intn(max-min+1) + min } + +type weightedChoice map[string]uint + +func (wc weightedChoice) Choose(r *rand.Rand) string { + choices := make([]weightedrand.Choice, 0, len(wc)) + for k, v := range wc { + choices = append(choices, weightedrand.NewChoice(k, v)) + } + + chooser, err := weightedrand.NewChooser(choices...) + if err != nil { + panic(err) + } + + return chooser.PickSource(r).(string) +} diff --git a/test/e2e/generator/random_test.go b/test/e2e/generator/random_test.go index 3fbb19ab5..48b04f2d1 100644 --- a/test/e2e/generator/random_test.go +++ b/test/e2e/generator/random_test.go @@ -1,9 +1,12 @@ package main import ( + "fmt" + "math/rand" "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestCombinations(t *testing.T) { @@ -29,3 +32,28 @@ func TestCombinations(t *testing.T) { {"bool": true, "int": 3, "string": "bar"}, }, c) } + +func TestUniformSetChoice(t *testing.T) { + set := uniformSetChoice([]string{"a", "b", "c"}) + r := rand.New(rand.NewSource(2384)) + + for i := 0; i < 100; i++ { + t.Run(fmt.Sprintf("Iteration%03d", i), func(t *testing.T) { + set = append(set, t.Name()) + + t.Run("ChooseAtLeastSubset", func(t *testing.T) { + require.True(t, len(set.ChooseAtLeast(r, 1)) >= 1) + require.True(t, len(set.ChooseAtLeast(r, 2)) >= 2) + require.True(t, len(set.ChooseAtLeast(r, len(set)/2)) >= len(set)/2) + }) + t.Run("ChooseAtLeastEqualOrGreaterToLength", func(t *testing.T) { + require.Len(t, set.ChooseAtLeast(r, len(set)), len(set)) + require.Len(t, set.ChooseAtLeast(r, len(set)+1), len(set)) + require.Len(t, set.ChooseAtLeast(r, len(set)*10), len(set)) + }) + t.Run("ChooseSingle", func(t *testing.T) { + require.True(t, len(set.Choose(r)) >= 1) + }) + }) + } +} diff --git a/test/e2e/networks/ci.toml b/test/e2e/networks/ci.toml index 00c73ccbd..f73a18859 100644 --- a/test/e2e/networks/ci.toml +++ b/test/e2e/networks/ci.toml @@ -1,7 +1,6 @@ # This testnet is run by CI, and attempts to cover a broad range of # functionality with a single network. -disable_legacy_p2p = false evidence = 5 initial_height = 1000 initial_state = {initial01 = "a", initial02 = "b", initial03 = "c"} @@ -35,6 +34,7 @@ perturb = ["restart"] perturb = ["disconnect"] seeds = ["seed01"] snapshot_interval = 5 +block_sync = "v0" [node.validator02] abci_protocol = "tcp" @@ -43,6 +43,7 @@ persist_interval = 0 perturb = ["restart"] privval_protocol = "tcp" seeds = ["seed01"] +block_sync = "v0" [node.validator03] database = "badgerdb" @@ -51,18 +52,22 @@ abci_protocol = "grpc" persist_interval = 3 perturb = ["kill"] privval_protocol = "grpc" -retain_blocks = 7 +block_sync = "v0" +retain_blocks = 10 [node.validator04] abci_protocol = "builtin" +snapshot_interval = 5 database = "rocksdb" persistent_peers = ["validator01"] perturb = ["pause"] +block_sync = "v0" [node.validator05] -database = "cleveldb" -block_sync = "v0" -seeds = ["seed01"] +database = "cleveldb" +block_sync = "v0" +state_sync = "p2p" +seeds = ["seed01"] start_at = 1005 # Becomes part of the validator set at 1010 abci_protocol = "grpc" perturb = ["pause", "disconnect", "restart"] @@ -71,12 +76,11 @@ privval_protocol = "tcp" [node.full01] mode = "full" start_at = 1010 -# FIXME: should be v2, disabled due to flake block_sync = "v0" -persistent_peers = ["validator01", "validator02", "validator03", "validator04", "validator05"] +persistent_peers = ["validator01", "validator02", "validator03", "validator04"] perturb = ["restart"] -retain_blocks = 7 -state_sync = true +retain_blocks = 10 +state_sync = "rpc" [node.light01] mode = "light" diff --git a/test/e2e/node/built-in.toml b/test/e2e/node/built-in.toml new file mode 100644 index 000000000..0a2146a58 --- /dev/null +++ b/test/e2e/node/built-in.toml @@ -0,0 +1,4 @@ +snapshot_interval = 100 +persist_interval = 1 +chain_id = "test-chain" +protocol = "builtin" diff --git a/test/e2e/app/config.go b/test/e2e/node/config.go similarity index 80% rename from test/e2e/app/config.go rename to test/e2e/node/config.go index d7e776538..fa7dcc497 100644 --- a/test/e2e/app/config.go +++ b/test/e2e/node/config.go @@ -6,6 +6,8 @@ import ( "fmt" "github.com/BurntSushi/toml" + + "github.com/tendermint/tendermint/test/e2e/app" ) // Config is the application configuration. @@ -22,10 +24,21 @@ type Config struct { PrivValServer string `toml:"privval_server"` PrivValKey string `toml:"privval_key"` PrivValState string `toml:"privval_state"` - Misbehaviors map[string]string `toml:"misbehaviors"` KeyType string `toml:"key_type"` } +// App extracts out the application specific configuration parameters +func (cfg *Config) App() *app.Config { + return &app.Config{ + Dir: cfg.Dir, + SnapshotInterval: cfg.SnapshotInterval, + RetainBlocks: cfg.RetainBlocks, + KeyType: cfg.KeyType, + ValidatorUpdates: cfg.ValidatorUpdates, + PersistInterval: cfg.PersistInterval, + } +} + // LoadConfig loads the configuration from disk. func LoadConfig(file string) (*Config, error) { cfg := &Config{ diff --git a/test/e2e/app/main.go b/test/e2e/node/main.go similarity index 96% rename from test/e2e/app/main.go rename to test/e2e/node/main.go index fd464220d..b5d9debe9 100644 --- a/test/e2e/app/main.go +++ b/test/e2e/node/main.go @@ -14,6 +14,7 @@ import ( "github.com/spf13/viper" "google.golang.org/grpc" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/server" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/ed25519" @@ -28,8 +29,8 @@ import ( "github.com/tendermint/tendermint/privval" grpcprivval "github.com/tendermint/tendermint/privval/grpc" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" - "github.com/tendermint/tendermint/proxy" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" + "github.com/tendermint/tendermint/test/e2e/app" e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) @@ -78,7 +79,7 @@ func run(configFile string) error { case string(e2e.ModeLight): err = startLightNode(cfg) case string(e2e.ModeSeed): - err = startSeedNode(cfg) + err = startSeedNode() default: err = startNode(cfg) } @@ -97,7 +98,7 @@ func run(configFile string) error { // startApp starts the application server, listening for connections from Tendermint. func startApp(cfg *Config) error { - app, err := NewApplication(cfg) + app, err := app.NewApplication(cfg.App()) if err != nil { return err } @@ -118,7 +119,7 @@ func startApp(cfg *Config) error { // // FIXME There is no way to simply load the configuration from a file, so we need to pull in Viper. func startNode(cfg *Config) error { - app, err := NewApplication(cfg) + app, err := app.NewApplication(cfg.App()) if err != nil { return err } @@ -130,7 +131,7 @@ func startNode(cfg *Config) error { n, err := node.New(tmcfg, nodeLogger, - proxy.NewLocalClientCreator(app), + abciclient.NewLocalCreator(app), nil, ) if err != nil { @@ -139,7 +140,7 @@ func startNode(cfg *Config) error { return n.Start() } -func startSeedNode(cfg *Config) error { +func startSeedNode() error { tmcfg, nodeLogger, err := setupNode() if err != nil { return fmt.Errorf("failed to setup config: %w", err) diff --git a/test/e2e/node/socket.toml b/test/e2e/node/socket.toml new file mode 100644 index 000000000..2f7913e62 --- /dev/null +++ b/test/e2e/node/socket.toml @@ -0,0 +1,5 @@ +snapshot_interval = 100 +persist_interval = 1 +chain_id = "test-chain" +protocol = "socket" +listen = "tcp://127.0.0.1:26658" diff --git a/test/e2e/pkg/manifest.go b/test/e2e/pkg/manifest.go index 5711be37d..16b99cfda 100644 --- a/test/e2e/pkg/manifest.go +++ b/test/e2e/pkg/manifest.go @@ -3,6 +3,7 @@ package e2e import ( "fmt" "os" + "sort" "github.com/BurntSushi/toml" ) @@ -59,14 +60,17 @@ type Manifest struct { // by individual nodes. LogLevel string `toml:"log_level"` - // DisableLegacyP2P enables use of the new p2p layer for all nodes in a test. - DisableLegacyP2P bool `toml:"disable_legacy_p2p"` - // QueueType describes the type of queue that the system uses internally QueueType string `toml:"queue_type"` // Number of bytes per tx. Default is 1kb (1024) TxSize int64 + + // ABCIProtocol specifies the protocol used to communicate with the ABCI + // application: "unix", "tcp", "grpc", or "builtin". Defaults to builtin. + // builtin will build a complete Tendermint node into the application and + // launch it instead of launching a separate Tendermint process. + ABCIProtocol string `toml:"abci_protocol"` } // ManifestNode represents a node in a testnet manifest. @@ -89,12 +93,6 @@ type ManifestNode struct { // "rocksdb", "boltdb", or "badgerdb". Defaults to goleveldb. Database string `toml:"database"` - // ABCIProtocol specifies the protocol used to communicate with the ABCI - // application: "unix", "tcp", "grpc", or "builtin". Defaults to unix. - // builtin will build a complete Tendermint node into the application and - // launch it instead of launching a separate Tendermint process. - ABCIProtocol string `toml:"abci_protocol"` - // PrivvalProtocol specifies the protocol used to sign consensus messages: // "file", "unix", "tcp", or "grpc". Defaults to "file". For tcp and unix, the ABCI // application will launch a remote signer client in a separate goroutine. @@ -117,7 +115,8 @@ type ManifestNode struct { // block hashes and RPC servers. At least one node in the network must have // SnapshotInterval set to non-zero, and the state syncing node must have // StartAt set to an appropriate height where a snapshot is available. - StateSync bool `toml:"state_sync"` + // StateSync can either be "p2p" or "rpc" or an empty string to disable + StateSync string `toml:"state_sync"` // PersistInterval specifies the height interval at which the application // will persist state to disk. Defaults to 1 (every height), setting this to @@ -146,9 +145,11 @@ type ManifestNode struct { // This is helpful when debugging a specific problem. This overrides the network // level. LogLevel string `toml:"log_level"` +} - // UseNewP2P enables use of the new p2p layer for this node. - DisableLegacyP2P bool `toml:"disable_legacy_p2p"` +// Stateless reports whether m is a node that does not own state, including light and seed nodes. +func (m ManifestNode) Stateless() bool { + return m.Mode == string(ModeLight) || m.Mode == string(ModeSeed) } // Save saves the testnet manifest to a file. @@ -169,3 +170,93 @@ func LoadManifest(file string) (Manifest, error) { } return manifest, nil } + +// SortManifests orders (in-place) a list of manifests such that the +// manifests will be ordered in terms of complexity (or expected +// runtime). Complexity is determined first by the number of nodes, +// and then by the total number of perturbations in the network. +// +// If reverse is true, then the manifests are ordered with the most +// complex networks before the less complex networks. +func SortManifests(manifests []Manifest, reverse bool) { + sort.SliceStable(manifests, func(i, j int) bool { + // sort based on a point-based comparison between two + // manifests. + var ( + left = manifests[i] + right = manifests[j] + ) + + // scores start with 100 points for each node. The + // number of nodes in a network is the most important + // factor in the complexity of the test. + leftScore := len(left.Nodes) * 100 + rightScore := len(right.Nodes) * 100 + + // add two points for every node perturbation, and one + // point for every node that starts after genesis. + for _, n := range left.Nodes { + leftScore += (len(n.Perturb) * 2) + + if n.StartAt > 0 { + leftScore += 3 + } + } + for _, n := range right.Nodes { + rightScore += (len(n.Perturb) * 2) + if n.StartAt > 0 { + rightScore += 3 + } + } + + // add one point if the network has evidence. + if left.Evidence > 0 { + leftScore += 2 + } + + if right.Evidence > 0 { + rightScore += 2 + } + + if left.TxSize > right.TxSize { + leftScore++ + } + + if right.TxSize > left.TxSize { + rightScore++ + } + + if reverse { + return leftScore >= rightScore + } + + return leftScore < rightScore + }) +} + +// SplitGroups divides a list of manifests into n groups of +// manifests. +func SplitGroups(groups int, manifests []Manifest) [][]Manifest { + groupSize := (len(manifests) + groups - 1) / groups + splitManifests := make([][]Manifest, 0, groups) + + for i := 0; i < len(manifests); i += groupSize { + grp := make([]Manifest, groupSize) + n := copy(grp, manifests[i:]) + splitManifests = append(splitManifests, grp[:n]) + } + + return splitManifests +} + +// WriteManifests writes a collection of manifests into files with the +// specified path prefix. +func WriteManifests(prefix string, manifests []Manifest) error { + for i, manifest := range manifests { + if err := manifest.Save(fmt.Sprintf("%s-%04d.toml", prefix, i)); err != nil { + return err + } + } + + return nil +} diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index cfeb54bde..d0770ce8d 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -50,6 +50,10 @@ const ( EvidenceAgeHeight int64 = 7 EvidenceAgeTime time.Duration = 500 * time.Millisecond + + StateSyncP2P = "p2p" + StateSyncRPC = "rpc" + StateSyncDisabled = "" ) // Testnet represents a single testnet. @@ -67,6 +71,7 @@ type Testnet struct { Evidence int LogLevel string TxSize int64 + ABCIProtocol string } // Node represents a Tendermint node in a testnet. @@ -81,7 +86,7 @@ type Node struct { StartAt int64 BlockSync string Mempool string - StateSync bool + StateSync string Database string ABCIProtocol Protocol PrivvalProtocol Protocol @@ -92,8 +97,8 @@ type Node struct { PersistentPeers []*Node Perturbations []Perturbation LogLevel string - DisableLegacyP2P bool QueueType string + HasStarted bool } // LoadTestnet loads a testnet from a manifest file, using the filename to @@ -136,6 +141,7 @@ func LoadTestnet(file string) (*Testnet, error) { KeyType: "ed25519", LogLevel: manifest.LogLevel, TxSize: manifest.TxSize, + ABCIProtocol: manifest.ABCIProtocol, } if len(manifest.KeyType) != 0 { testnet.KeyType = manifest.KeyType @@ -146,6 +152,9 @@ func LoadTestnet(file string) (*Testnet, error) { if manifest.InitialHeight > 0 { testnet.InitialHeight = manifest.InitialHeight } + if testnet.ABCIProtocol == "" { + testnet.ABCIProtocol = string(ProtocolBuiltin) + } // Set up nodes, in alphabetical order (IPs and ports get same order). nodeNames := []string{} @@ -165,10 +174,10 @@ func LoadTestnet(file string) (*Testnet, error) { ProxyPort: proxyPortGen.Next(), Mode: ModeValidator, Database: "goleveldb", - ABCIProtocol: ProtocolBuiltin, + ABCIProtocol: Protocol(testnet.ABCIProtocol), PrivvalProtocol: ProtocolFile, StartAt: nodeManifest.StartAt, - BlockSync: nodeManifest.BlockSync, + BlockSync: "v0", Mempool: nodeManifest.Mempool, StateSync: nodeManifest.StateSync, PersistInterval: 1, @@ -177,21 +186,19 @@ func LoadTestnet(file string) (*Testnet, error) { Perturbations: []Perturbation{}, LogLevel: manifest.LogLevel, QueueType: manifest.QueueType, - DisableLegacyP2P: manifest.DisableLegacyP2P || nodeManifest.DisableLegacyP2P, } - if node.StartAt == testnet.InitialHeight { node.StartAt = 0 // normalize to 0 for initial nodes, since code expects this } if nodeManifest.Mode != "" { node.Mode = Mode(nodeManifest.Mode) } + if node.Mode == ModeLight { + node.ABCIProtocol = ProtocolBuiltin + } if nodeManifest.Database != "" { node.Database = nodeManifest.Database } - if nodeManifest.ABCIProtocol != "" { - node.ABCIProtocol = Protocol(nodeManifest.ABCIProtocol) - } if nodeManifest.PrivvalProtocol != "" { node.PrivvalProtocol = Protocol(nodeManifest.PrivvalProtocol) } @@ -333,13 +340,18 @@ func (n Node) Validate(testnet Testnet) error { default: return fmt.Errorf("invalid block sync setting %q", n.BlockSync) } + switch n.StateSync { + case StateSyncDisabled, StateSyncP2P, StateSyncRPC: + default: + return fmt.Errorf("invalid state sync setting %q", n.StateSync) + } switch n.Mempool { case "", "v0", "v1": default: return fmt.Errorf("invalid mempool version %q", n.Mempool) } switch n.QueueType { - case "", "priority", "wdrr", "fifo": + case "", "priority", "fifo": default: return fmt.Errorf("unsupported p2p queue type: %s", n.QueueType) } @@ -366,7 +378,7 @@ func (n Node) Validate(testnet Testnet) error { return fmt.Errorf("cannot start at height %v lower than initial height %v", n.StartAt, n.Testnet.InitialHeight) } - if n.StateSync && n.StartAt == 0 { + if n.StateSync != StateSyncDisabled && n.StartAt == 0 { return errors.New("state synced nodes cannot start at the initial height") } if n.RetainBlocks != 0 && n.RetainBlocks < uint64(EvidenceAgeHeight) { @@ -417,16 +429,6 @@ func (t Testnet) ArchiveNodes() []*Node { return nodes } -// RandomNode returns a random non-seed node. -func (t Testnet) RandomNode() *Node { - for { - node := t.Nodes[rand.Intn(len(t.Nodes))] - if node.Mode != ModeSeed { - return node - } - } -} - // IPv6 returns true if the testnet is an IPv6 network. func (t Testnet) IPv6() bool { return t.IP.IP.To4() == nil diff --git a/test/e2e/run-multiple.sh b/test/e2e/run-multiple.sh index 5d6a20ef9..571a78a7f 100755 --- a/test/e2e/run-multiple.sh +++ b/test/e2e/run-multiple.sh @@ -19,7 +19,7 @@ FAILED=() for MANIFEST in "$@"; do START=$SECONDS - echo "==> Running testnet $MANIFEST..." + echo "==> Running testnet: $MANIFEST" if ! ./build/runner -f "$MANIFEST"; then echo "==> Testnet $MANIFEST failed, dumping manifest..." diff --git a/test/e2e/runner/benchmark.go b/test/e2e/runner/benchmark.go index 74d2491f5..50a2c33f9 100644 --- a/test/e2e/runner/benchmark.go +++ b/test/e2e/runner/benchmark.go @@ -21,8 +21,8 @@ import ( // // Metrics are based of the `benchmarkLength`, the amount of consecutive blocks // sampled from in the testnet -func Benchmark(testnet *e2e.Testnet, benchmarkLength int64) error { - block, _, err := waitForHeight(testnet, 0) +func Benchmark(ctx context.Context, testnet *e2e.Testnet, benchmarkLength int64) error { + block, err := getLatestBlock(ctx, testnet) if err != nil { return err } @@ -32,13 +32,15 @@ func Benchmark(testnet *e2e.Testnet, benchmarkLength int64) error { // wait for the length of the benchmark period in blocks to pass. We allow 5 seconds for each block // which should be sufficient. waitingTime := time.Duration(benchmarkLength*5) * time.Second - endHeight, err := waitForAllNodes(testnet, block.Height+benchmarkLength, waitingTime) + ctx, cancel := context.WithTimeout(ctx, waitingTime) + defer cancel() + block, _, err = waitForHeight(ctx, testnet, block.Height+benchmarkLength) if err != nil { return err } dur := time.Since(startAt) - logger.Info("Ending benchmark period", "height", endHeight) + logger.Info("Ending benchmark period", "height", block.Height) // fetch a sample of blocks blocks, err := fetchBlockChainSample(testnet, benchmarkLength) diff --git a/test/e2e/runner/evidence.go b/test/e2e/runner/evidence.go index 6a246dcb5..25c4a1cc4 100644 --- a/test/e2e/runner/evidence.go +++ b/test/e2e/runner/evidence.go @@ -3,6 +3,7 @@ package main import ( "bytes" "context" + "errors" "fmt" "io/ioutil" "math/rand" @@ -27,9 +28,24 @@ const lightClientEvidenceRatio = 4 // evidence and broadcasts it to a random node through the rpc endpoint `/broadcast_evidence`. // Evidence is random and can be a mixture of LightClientAttackEvidence and // DuplicateVoteEvidence. -func InjectEvidence(testnet *e2e.Testnet, amount int) error { +func InjectEvidence(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet, amount int) error { // select a random node - targetNode := testnet.RandomNode() + var targetNode *e2e.Node + + for _, idx := range r.Perm(len(testnet.Nodes)) { + targetNode = testnet.Nodes[idx] + + if targetNode.Mode == e2e.ModeSeed || targetNode.Mode == e2e.ModeLight { + targetNode = nil + continue + } + + break + } + + if targetNode == nil { + return errors.New("could not find node to inject evidence into") + } logger.Info(fmt.Sprintf("Injecting evidence through %v (amount: %d)...", targetNode.Name, amount)) @@ -63,9 +79,12 @@ func InjectEvidence(testnet *e2e.Testnet, amount int) error { return err } + wctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + // wait for the node to reach the height above the forged height so that // it is able to validate the evidence - _, err = waitForNode(targetNode, waitHeight, 30*time.Second) + _, err = waitForNode(wctx, targetNode, waitHeight) if err != nil { return err } @@ -91,9 +110,12 @@ func InjectEvidence(testnet *e2e.Testnet, amount int) error { } } + wctx, cancel = context.WithTimeout(ctx, 30*time.Second) + defer cancel() + // wait for the node to reach the height above the forged height so that // it is able to validate the evidence - _, err = waitForNode(targetNode, blockRes.Block.Height+2, 10*time.Second) + _, err = waitForNode(wctx, targetNode, blockRes.Block.Height+2) if err != nil { return err } @@ -181,10 +203,10 @@ func generateDuplicateVoteEvidence( chainID string, time time.Time, ) (*types.DuplicateVoteEvidence, error) { - // nolint:gosec // G404: Use of weak random number generator - privVal := privVals[rand.Intn(len(privVals))] - - valIdx, _ := vals.GetByAddress(privVal.PrivKey.PubKey().Address()) + privVal, valIdx, err := getRandomValidatorIndex(privVals, vals) + if err != nil { + return nil, err + } voteA, err := factory.MakeVote(privVal, chainID, valIdx, height, 0, 2, makeRandomBlockID(), time) if err != nil { return nil, err @@ -193,14 +215,27 @@ func generateDuplicateVoteEvidence( if err != nil { return nil, err } - ev := types.NewDuplicateVoteEvidence(voteA, voteB, time, vals) - if ev == nil { - return nil, fmt.Errorf("could not generate evidence a=%v b=%v vals=%v", voteA, voteB, vals) + ev, err := types.NewDuplicateVoteEvidence(voteA, voteB, time, vals) + if err != nil { + return nil, fmt.Errorf("could not generate evidence: %w", err) } return ev, nil } +// getRandomValidatorIndex picks a random validator from a slice of mock PrivVals that's +// also part of the validator set, returning the PrivVal and its index in the validator set +func getRandomValidatorIndex(privVals []types.MockPV, vals *types.ValidatorSet) (types.MockPV, int32, error) { + for _, idx := range rand.Perm(len(privVals)) { + pv := privVals[idx] + valIdx, _ := vals.GetByAddress(pv.PrivKey.PubKey().Address()) + if valIdx >= 0 { + return pv, valIdx, nil + } + } + return types.MockPV{}, -1, errors.New("no private validator found in validator set") +} + func readPrivKey(keyFilePath string) (crypto.PrivKey, error) { keyJSONBytes, err := ioutil.ReadFile(keyFilePath) if err != nil { diff --git a/test/e2e/runner/load.go b/test/e2e/runner/load.go index 518e32564..f31b436dd 100644 --- a/test/e2e/runner/load.go +++ b/test/e2e/runner/load.go @@ -1,11 +1,10 @@ package main import ( + "container/ring" "context" - "crypto/rand" - "errors" "fmt" - "math" + "math/rand" "time" rpchttp "github.com/tendermint/tendermint/rpc/client/http" @@ -14,113 +13,192 @@ import ( ) // Load generates transactions against the network until the given context is -// canceled. A multiplier of greater than one can be supplied if load needs to -// be generated beyond a minimum amount. -func Load(ctx context.Context, testnet *e2e.Testnet, multiplier int) error { +// canceled. +func Load(ctx context.Context, r *rand.Rand, testnet *e2e.Testnet) error { // Since transactions are executed across all nodes in the network, we need // to reduce transaction load for larger networks to avoid using too much // CPU. This gives high-throughput small networks and low-throughput large ones. // This also limits the number of TCP connections, since each worker has // a connection to all nodes. - concurrency := 64 / len(testnet.Nodes) - if concurrency == 0 { - concurrency = 1 + concurrency := len(testnet.Nodes) * 2 + if concurrency > 32 { + concurrency = 32 } - initialTimeout := 1 * time.Minute - stallTimeout := 30 * time.Second chTx := make(chan types.Tx) - chSuccess := make(chan types.Tx) + chSuccess := make(chan int) // success counts per iteration ctx, cancel := context.WithCancel(ctx) defer cancel() // Spawn job generator and processors. - logger.Info(fmt.Sprintf("Starting transaction load (%v workers)...", concurrency)) + logger.Info("starting transaction load", + "workers", concurrency, + "nodes", len(testnet.Nodes), + "tx", testnet.TxSize) + started := time.Now() - go loadGenerate(ctx, chTx, multiplier, testnet.TxSize) + go loadGenerate(ctx, r, chTx, testnet.TxSize, len(testnet.Nodes)) for w := 0; w < concurrency; w++ { go loadProcess(ctx, testnet, chTx, chSuccess) } - // Monitor successful transactions, and abort on stalls. + // Montior transaction to ensure load propagates to the network + // + // This loop doesn't check or time out for stalls, since a stall here just + // aborts the load generator sooner and could obscure backpressure + // from the test harness, and there are other checks for + // stalls in the framework. Ideally we should monitor latency as a guide + // for when to give up, but we don't have a good way to track that yet. success := 0 - timeout := initialTimeout for { select { - case <-chSuccess: - success++ - timeout = stallTimeout - case <-time.After(timeout): - return fmt.Errorf("unable to submit transactions for %v", timeout) + case numSeen := <-chSuccess: + success += numSeen case <-ctx.Done(): if success == 0 { - return errors.New("failed to submit any transactions") + return fmt.Errorf("failed to submit transactions in %s by %d workers", + time.Since(started), concurrency) } - logger.Info(fmt.Sprintf("Ending transaction load after %v txs (%.1f tx/s)...", - success, float64(success)/time.Since(started).Seconds())) + + // TODO perhaps allow test networks to + // declare required transaction rates, which + // might allow us to avoid the special case + // around 0 txs above. + rate := float64(success) / time.Since(started).Seconds() + + logger.Info("ending transaction load", + "dur_secs", time.Since(started).Seconds(), + "txns", success, + "workers", concurrency, + "rate", rate) + return nil } } } -// loadGenerate generates jobs until the context is canceled -func loadGenerate(ctx context.Context, chTx chan<- types.Tx, multiplier int, size int64) { - for i := 0; i < math.MaxInt64; i++ { +// loadGenerate generates jobs until the context is canceled. +// +// The chTx has multiple consumers, thus the rate limiting of the load +// generation is primarily the result of backpressure from the +// broadcast transaction, though there is still some timer-based +// limiting. +func loadGenerate(ctx context.Context, r *rand.Rand, chTx chan<- types.Tx, txSize int64, networkSize int) { + timer := time.NewTimer(0) + defer timer.Stop() + defer close(chTx) + + for { + select { + case <-ctx.Done(): + return + case <-timer.C: + } + // We keep generating the same 100 keys over and over, with different values. // This gives a reasonable load without putting too much data in the app. - id := i % 100 + id := rand.Int63() % 100 // nolint: gosec - bz := make([]byte, size) - _, err := rand.Read(bz) + bz := make([]byte, txSize) + _, err := r.Read(bz) if err != nil { panic(fmt.Sprintf("Failed to read random bytes: %v", err)) } tx := types.Tx(fmt.Sprintf("load-%X=%x", id, bz)) select { - case chTx <- tx: - sqrtSize := int(math.Sqrt(float64(size))) - time.Sleep(10 * time.Millisecond * time.Duration(sqrtSize/multiplier)) - case <-ctx.Done(): - close(chTx) return + case chTx <- tx: + // sleep for a bit before sending the + // next transaction. + timer.Reset(loadGenerateWaitTime(r, networkSize)) } + } } +func loadGenerateWaitTime(r *rand.Rand, size int) time.Duration { + const ( + min = int64(250 * time.Millisecond) + max = int64(time.Second) + ) + + var ( + baseJitter = r.Int63n(max-min+1) + min + sizeFactor = int64(size) * min + sizeJitter = r.Int63n(sizeFactor-min+1) + min + ) + + return time.Duration(baseJitter + sizeJitter) +} + // loadProcess processes transactions -func loadProcess(ctx context.Context, testnet *e2e.Testnet, chTx <-chan types.Tx, chSuccess chan<- types.Tx) { - // Each worker gets its own client to each node, which allows for some - // concurrency while still bounding it. - clients := map[string]*rpchttp.HTTP{} +func loadProcess(ctx context.Context, testnet *e2e.Testnet, chTx <-chan types.Tx, chSuccess chan<- int) { + // Each worker gets its own client to each usable node, which + // allows for some concurrency while still bounding it. + clients := make([]*rpchttp.HTTP, 0, len(testnet.Nodes)) - var err error - for tx := range chTx { - node := testnet.RandomNode() - - client, ok := clients[node.Name] - if !ok { - client, err = node.Client() - if err != nil { - continue - } - - // check that the node is up - _, err = client.Health(ctx) - if err != nil { - continue - } - - clients[node.Name] = client - } - - if _, err = client.BroadcastTxSync(ctx, tx); err != nil { + for idx := range testnet.Nodes { + // Construct a list of usable nodes for the creating + // load. Don't send load through seed nodes because + // they do not provide the RPC endpoints required to + // broadcast transaction. + if testnet.Nodes[idx].Mode == e2e.ModeSeed { continue } - chSuccess <- tx + client, err := testnet.Nodes[idx].Client() + if err != nil { + continue + } + + clients = append(clients, client) + } + + if len(clients) == 0 { + panic("no clients to process load") + } + + // Put the clients in a ring so they can be used in a + // round-robin fashion. + clientRing := ring.New(len(clients)) + for idx := range clients { + clientRing.Value = clients[idx] + clientRing = clientRing.Next() + } + + successes := 0 + for { + select { + case <-ctx.Done(): + return + case tx := <-chTx: + clientRing = clientRing.Next() + client := clientRing.Value.(*rpchttp.HTTP) + + if status, err := client.Status(ctx); err != nil { + continue + } else if status.SyncInfo.CatchingUp { + continue + } + + if _, err := client.BroadcastTxSync(ctx, tx); err != nil { + continue + } + successes++ + + select { + case chSuccess <- successes: + successes = 0 // reset counter for the next iteration + continue + case <-ctx.Done(): + return + default: + } + + } } } diff --git a/test/e2e/runner/main.go b/test/e2e/runner/main.go index cb3d7d6bc..fb6ce4a8c 100644 --- a/test/e2e/runner/main.go +++ b/test/e2e/runner/main.go @@ -3,8 +3,10 @@ package main import ( "context" "fmt" + "math/rand" "os" "strconv" + "time" "github.com/spf13/cobra" @@ -12,9 +14,9 @@ import ( e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) -var ( - logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) -) +const randomSeed = 2308084734268 + +var logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) func main() { NewCLI().Run() @@ -48,63 +50,86 @@ func NewCLI() *CLI { cli.testnet = testnet return nil }, - RunE: func(cmd *cobra.Command, args []string) error { - if err := Cleanup(cli.testnet); err != nil { + RunE: func(cmd *cobra.Command, args []string) (err error) { + if err = Cleanup(cli.testnet); err != nil { return err } - if err := Setup(cli.testnet); err != nil { + defer func() { + if cli.preserve { + logger.Info("Preserving testnet contents because -preserve=true") + } else if err != nil { + logger.Info("Preserving testnet that encountered error", + "err", err) + } else if err := Cleanup(cli.testnet); err != nil { + logger.Error("Error cleaning up testnet contents", "err", err) + } + }() + if err = Setup(cli.testnet); err != nil { return err } + r := rand.New(rand.NewSource(randomSeed)) // nolint: gosec + chLoadResult := make(chan error) - ctx, loadCancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + + lctx, loadCancel := context.WithCancel(ctx) defer loadCancel() go func() { - err := Load(ctx, cli.testnet, 1) - chLoadResult <- err + chLoadResult <- Load(lctx, r, cli.testnet) }() - - if err := Start(cli.testnet); err != nil { + startAt := time.Now() + if err = Start(ctx, cli.testnet); err != nil { return err } - if err := Wait(cli.testnet, 5); err != nil { // allow some txs to go through + if err = Wait(ctx, cli.testnet, 5); err != nil { // allow some txs to go through return err } if cli.testnet.HasPerturbations() { - if err := Perturb(cli.testnet); err != nil { + if err = Perturb(ctx, cli.testnet); err != nil { return err } - if err := Wait(cli.testnet, 5); err != nil { // allow some txs to go through + if err = Wait(ctx, cli.testnet, 5); err != nil { // allow some txs to go through return err } } if cli.testnet.Evidence > 0 { - if err := InjectEvidence(cli.testnet, cli.testnet.Evidence); err != nil { + if err = InjectEvidence(ctx, r, cli.testnet, cli.testnet.Evidence); err != nil { return err } - if err := Wait(cli.testnet, 5); err != nil { // ensure chain progress + if err = Wait(ctx, cli.testnet, 5); err != nil { // ensure chain progress return err } } + // to help make sure that we don't run into + // situations where 0 transactions have + // happened on quick cases, we make sure that + // it's been at least 10s before canceling the + // load generator. + // + // TODO allow the load generator to report + // successful transactions to avoid needing + // this sleep. + if rest := time.Since(startAt); rest < 15*time.Second { + time.Sleep(15*time.Second - rest) + } + loadCancel() - if err := <-chLoadResult; err != nil { + + if err = <-chLoadResult; err != nil { return fmt.Errorf("transaction load failed: %w", err) } - if err := Wait(cli.testnet, 5); err != nil { // wait for network to settle before tests + if err = Wait(ctx, cli.testnet, 5); err != nil { // wait for network to settle before tests return err } if err := Test(cli.testnet); err != nil { return err } - if !cli.preserve { - if err := Cleanup(cli.testnet); err != nil { - return err - } - } return nil }, } @@ -139,7 +164,7 @@ func NewCLI() *CLI { if err != nil { return err } - return Start(cli.testnet) + return Start(cmd.Context(), cli.testnet) }, }) @@ -147,7 +172,7 @@ func NewCLI() *CLI { Use: "perturb", Short: "Perturbs the Docker testnet, e.g. by restarting or disconnecting nodes", RunE: func(cmd *cobra.Command, args []string) error { - return Perturb(cli.testnet) + return Perturb(cmd.Context(), cli.testnet) }, }) @@ -155,7 +180,7 @@ func NewCLI() *CLI { Use: "wait", Short: "Waits for a few blocks to be produced and all nodes to catch up", RunE: func(cmd *cobra.Command, args []string) error { - return Wait(cli.testnet, 5) + return Wait(cmd.Context(), cli.testnet, 5) }, }) @@ -169,29 +194,32 @@ func NewCLI() *CLI { }) cli.root.AddCommand(&cobra.Command{ - Use: "resume", - Short: "Resumes the Docker testnet", + Use: "pause", + Short: "Pauses the Docker testnet", RunE: func(cmd *cobra.Command, args []string) error { - logger.Info("Resuming testnet") - return execCompose(cli.testnet.Dir, "up") + logger.Info("Pausing testnet") + return execCompose(cli.testnet.Dir, "pause") }, }) cli.root.AddCommand(&cobra.Command{ - Use: "load [multiplier]", - Args: cobra.MaximumNArgs(1), + Use: "resume", + Short: "Resumes the Docker testnet", + RunE: func(cmd *cobra.Command, args []string) error { + logger.Info("Resuming testnet") + return execCompose(cli.testnet.Dir, "unpause") + }, + }) + + cli.root.AddCommand(&cobra.Command{ + Use: "load", Short: "Generates transaction load until the command is canceled", RunE: func(cmd *cobra.Command, args []string) (err error) { - m := 1 - - if len(args) == 1 { - m, err = strconv.Atoi(args[0]) - if err != nil { - return err - } - } - - return Load(context.Background(), cli.testnet, m) + return Load( + cmd.Context(), + rand.New(rand.NewSource(randomSeed)), // nolint: gosec + cli.testnet, + ) }, }) @@ -209,7 +237,12 @@ func NewCLI() *CLI { } } - return InjectEvidence(cli.testnet, amount) + return InjectEvidence( + cmd.Context(), + rand.New(rand.NewSource(randomSeed)), // nolint: gosec + cli.testnet, + amount, + ) }, }) @@ -267,28 +300,39 @@ Does not run any perbutations. if err := Cleanup(cli.testnet); err != nil { return err } + defer func() { + if err := Cleanup(cli.testnet); err != nil { + logger.Error("Error cleaning up testnet contents", "err", err) + } + }() + if err := Setup(cli.testnet); err != nil { return err } chLoadResult := make(chan error) - ctx, loadCancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(cmd.Context()) + defer cancel() + + r := rand.New(rand.NewSource(randomSeed)) // nolint: gosec + + lctx, loadCancel := context.WithCancel(ctx) defer loadCancel() go func() { - err := Load(ctx, cli.testnet, 1) + err := Load(lctx, r, cli.testnet) chLoadResult <- err }() - if err := Start(cli.testnet); err != nil { + if err := Start(ctx, cli.testnet); err != nil { return err } - if err := Wait(cli.testnet, 5); err != nil { // allow some txs to go through + if err := Wait(ctx, cli.testnet, 5); err != nil { // allow some txs to go through return err } // we benchmark performance over the next 100 blocks - if err := Benchmark(cli.testnet, 100); err != nil { + if err := Benchmark(ctx, cli.testnet, 100); err != nil { return err } @@ -297,10 +341,6 @@ Does not run any perbutations. return err } - if err := Cleanup(cli.testnet); err != nil { - return err - } - return nil }, }) diff --git a/test/e2e/runner/perturb.go b/test/e2e/runner/perturb.go index 6909c665a..ccb3f6c51 100644 --- a/test/e2e/runner/perturb.go +++ b/test/e2e/runner/perturb.go @@ -1,22 +1,33 @@ package main import ( + "context" "fmt" "time" - rpctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/coretypes" e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) // Perturbs a running testnet. -func Perturb(testnet *e2e.Testnet) error { +func Perturb(ctx context.Context, testnet *e2e.Testnet) error { + timer := time.NewTimer(0) // first tick fires immediately; reset below + defer timer.Stop() + for _, node := range testnet.Nodes { for _, perturbation := range node.Perturbations { - _, err := PerturbNode(node, perturbation) - if err != nil { - return err + select { + case <-ctx.Done(): + return ctx.Err() + case <-timer.C: + _, err := PerturbNode(ctx, node, perturbation) + if err != nil { + return err + } + + // give network some time to recover between each + timer.Reset(20 * time.Second) } - time.Sleep(20 * time.Second) // give network some time to recover between each } } return nil @@ -24,7 +35,7 @@ func Perturb(testnet *e2e.Testnet) error { // PerturbNode perturbs a node with a given perturbation, returning its status // after recovering. -func PerturbNode(node *e2e.Node, perturbation e2e.Perturbation) (*rpctypes.ResultStatus, error) { +func PerturbNode(ctx context.Context, node *e2e.Node, perturbation e2e.Perturbation) (*rpctypes.ResultStatus, error) { testnet := node.Testnet switch perturbation { case e2e.PerturbationDisconnect: @@ -59,7 +70,11 @@ func PerturbNode(node *e2e.Node, perturbation e2e.Perturbation) (*rpctypes.Resul case e2e.PerturbationRestart: logger.Info(fmt.Sprintf("Restarting node %v...", node.Name)) - if err := execCompose(testnet.Dir, "restart", node.Name); err != nil { + if err := execCompose(testnet.Dir, "kill", "-s", "SIGTERM", node.Name); err != nil { + return nil, err + } + time.Sleep(10 * time.Second) + if err := execCompose(testnet.Dir, "start", node.Name); err != nil { return nil, err } @@ -73,7 +88,9 @@ func PerturbNode(node *e2e.Node, perturbation e2e.Perturbation) (*rpctypes.Resul return nil, nil } - status, err := waitForNode(node, 0, 3*time.Minute) + ctx, cancel := context.WithTimeout(ctx, 5*time.Minute) + defer cancel() + status, err := waitForNode(ctx, node, 0) if err != nil { return nil, err } diff --git a/test/e2e/runner/rpc.go b/test/e2e/runner/rpc.go index 52c009caa..ad5fa7a64 100644 --- a/test/e2e/runner/rpc.go +++ b/test/e2e/runner/rpc.go @@ -7,66 +7,129 @@ import ( "time" rpchttp "github.com/tendermint/tendermint/rpc/client/http" - rpctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/coretypes" e2e "github.com/tendermint/tendermint/test/e2e/pkg" "github.com/tendermint/tendermint/types" ) // waitForHeight waits for the network to reach a certain height (or above), -// returning the highest height seen. Errors if the network is not making +// returning the block at the height seen. Errors if the network is not making // progress at all. -func waitForHeight(testnet *e2e.Testnet, height int64) (*types.Block, *types.BlockID, error) { +// If height == 0, the initial height of the test network is used as the target. +func waitForHeight(ctx context.Context, testnet *e2e.Testnet, height int64) (*types.Block, *types.BlockID, error) { var ( - err error - maxResult *rpctypes.ResultBlock - clients = map[string]*rpchttp.HTTP{} - lastIncrease = time.Now() + err error + clients = map[string]*rpchttp.HTTP{} + lastHeight int64 + lastIncrease = time.Now() + nodesAtHeight = map[string]struct{}{} + numRunningNodes int ) + if height == 0 { + height = testnet.InitialHeight + } + for _, node := range testnet.Nodes { + if node.Stateless() { + continue + } + + if node.HasStarted { + numRunningNodes++ + } + } + + timer := time.NewTimer(0) + defer timer.Stop() for { - for _, node := range testnet.Nodes { - if node.Mode == e2e.ModeSeed { - continue - } - client, ok := clients[node.Name] - if !ok { - client, err = node.Client() + select { + case <-ctx.Done(): + return nil, nil, ctx.Err() + case <-timer.C: + for _, node := range testnet.Nodes { + // skip nodes that have reached the target height + if _, ok := nodesAtHeight[node.Name]; ok { + continue + } + + // skip nodes that don't have state or haven't started yet + if node.Stateless() { + continue + } + if !node.HasStarted { + continue + } + + // cache the clients + client, ok := clients[node.Name] + if !ok { + client, err = node.Client() + if err != nil { + continue + } + clients[node.Name] = client + } + + wctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + result, err := client.Status(wctx) if err != nil { continue } - clients[node.Name] = client + if result.SyncInfo.LatestBlockHeight > lastHeight { + lastHeight = result.SyncInfo.LatestBlockHeight + lastIncrease = time.Now() + } + + if result.SyncInfo.LatestBlockHeight >= height { + // the node has achieved the target height! + + // add this node to the set of target + // height nodes + nodesAtHeight[node.Name] = struct{}{} + + // if not all of the nodes that we + // have clients for have reached the + // target height, keep trying. + if numRunningNodes > len(nodesAtHeight) { + continue + } + + // All nodes are at or above the target height. Now fetch the block for that target height + // and return it. We loop again through all clients because some may have pruning set but + // at least two of them should be archive nodes. + for _, c := range clients { + result, err := c.Block(ctx, &height) + if err != nil || result == nil || result.Block == nil { + continue + } + return result.Block, &result.BlockID, err + } + } } - ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) - defer cancel() - result, err := client.Block(ctx, nil) - if err != nil { - continue + if len(clients) == 0 { + return nil, nil, errors.New("unable to connect to any network nodes") } - if result.Block != nil && (maxResult == nil || result.Block.Height > maxResult.Block.Height) { - maxResult = result - lastIncrease = time.Now() - } - if maxResult != nil && maxResult.Block.Height >= height { - return maxResult.Block, &maxResult.BlockID, nil - } - } + if time.Since(lastIncrease) >= time.Minute { + if lastHeight == 0 { + return nil, nil, errors.New("chain stalled at unknown height (most likely upon starting)") + } + + return nil, nil, fmt.Errorf("chain stalled at height %v [%d of %d nodes %+v]", + lastHeight, + len(nodesAtHeight), + numRunningNodes, + nodesAtHeight) - if len(clients) == 0 { - return nil, nil, errors.New("unable to connect to any network nodes") - } - if time.Since(lastIncrease) >= time.Minute { - if maxResult == nil { - return nil, nil, errors.New("chain stalled at unknown height") } - return nil, nil, fmt.Errorf("chain stalled at height %v", maxResult.Block.Height) + timer.Reset(1 * time.Second) } - time.Sleep(1 * time.Second) } } // waitForNode waits for a node to become available and catch up to the given block height. -func waitForNode(node *e2e.Node, height int64, timeout time.Duration) (*rpctypes.ResultStatus, error) { +func waitForNode(ctx context.Context, node *e2e.Node, height int64) (*rpctypes.ResultStatus, error) { if node.Mode == e2e.ModeSeed { return nil, nil } @@ -75,42 +138,91 @@ func waitForNode(node *e2e.Node, height int64, timeout time.Duration) (*rpctypes return nil, err } - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() + timer := time.NewTimer(0) + defer timer.Stop() + var ( + lastFailed bool + counter int + ) for { - status, err := client.Status(ctx) - switch { - case errors.Is(err, context.DeadlineExceeded): - return nil, fmt.Errorf("timed out waiting for %v to reach height %v", node.Name, height) - case errors.Is(err, context.Canceled): - return nil, err - case err == nil && status.SyncInfo.LatestBlockHeight >= height: - return status, nil + counter++ + if lastFailed { + lastFailed = false + + // if there was a problem with the request in + // the previous recreate the client to ensure + // reconnection + client, err = node.Client() + if err != nil { + return nil, err + } } - time.Sleep(300 * time.Millisecond) + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-timer.C: + status, err := client.Status(ctx) + switch { + case errors.Is(err, context.DeadlineExceeded): + return nil, fmt.Errorf("timed out waiting for %v to reach height %v", node.Name, height) + case errors.Is(err, context.Canceled): + return nil, err + case err == nil && status.SyncInfo.LatestBlockHeight >= height: + return status, nil + case counter%500 == 0: + switch { + case err != nil: + lastFailed = true + logger.Error("node not yet ready", + "iter", counter, + "node", node.Name, + "target", height, + "err", err, + ) + case status != nil: + logger.Info("node not yet ready", + "iter", counter, + "node", node.Name, + "height", status.SyncInfo.LatestBlockHeight, + "target", height, + ) + } + } + timer.Reset(250 * time.Millisecond) + } } } -// waitForAllNodes waits for all nodes to become available and catch up to the given block height. -func waitForAllNodes(testnet *e2e.Testnet, height int64, timeout time.Duration) (int64, error) { - var lastHeight int64 - +// getLatestBlock returns the last block that all active nodes in the network have +// agreed upon i.e. the earlist of each nodes latest block +func getLatestBlock(ctx context.Context, testnet *e2e.Testnet) (*types.Block, error) { + var earliestBlock *types.Block for _, node := range testnet.Nodes { - if node.Mode == e2e.ModeSeed { + // skip nodes that don't have state or haven't started yet + if node.Stateless() { + continue + } + if !node.HasStarted { continue } - status, err := waitForNode(node, height, timeout) + client, err := node.Client() if err != nil { - return 0, err + return nil, err } - if status.SyncInfo.LatestBlockHeight > lastHeight { - lastHeight = status.SyncInfo.LatestBlockHeight + wctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + result, err := client.Block(wctx, nil) + if err != nil { + return nil, err + } + + if result.Block != nil && (earliestBlock == nil || earliestBlock.Height > result.Block.Height) { + earliestBlock = result.Block } } - - return lastHeight, nil + return earliestBlock, nil } diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index c968ef306..9bf76c874 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -237,8 +237,6 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.RPC.ListenAddress = "tcp://0.0.0.0:26657" cfg.RPC.PprofListenAddress = ":6060" cfg.P2P.ExternalAddress = fmt.Sprintf("tcp://%v", node.AddressP2P(false)) - cfg.P2P.AddrBookStrict = false - cfg.P2P.DisableLegacy = node.DisableLegacyP2P cfg.P2P.QueueType = node.QueueType cfg.DBBackend = node.Database cfg.StateSync.DiscoveryTime = 5 * time.Second @@ -296,16 +294,18 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { cfg.Mempool.Version = node.Mempool } + cfg.BlockSync.Enable = true if node.BlockSync == "" { - cfg.FastSyncMode = false - } else { - cfg.BlockSync.Version = node.BlockSync + cfg.BlockSync.Enable = false } - if node.StateSync { + switch node.StateSync { + case e2e.StateSyncP2P: + cfg.StateSync.Enable = true + cfg.StateSync.UseP2P = true + case e2e.StateSyncRPC: cfg.StateSync.Enable = true cfg.StateSync.RPCServers = []string{} - for _, peer := range node.Testnet.ArchiveNodes() { if peer.Name == node.Name { continue @@ -342,17 +342,16 @@ func MakeConfig(node *e2e.Node) (*config.Config, error) { // MakeAppConfig generates an ABCI application config for a node. func MakeAppConfig(node *e2e.Node) ([]byte, error) { cfg := map[string]interface{}{ - "chain_id": node.Testnet.Name, - "dir": "data/app", - "listen": AppAddressUNIX, - "mode": node.Mode, - "proxy_port": node.ProxyPort, - "protocol": "socket", - "persist_interval": node.PersistInterval, - "snapshot_interval": node.SnapshotInterval, - "retain_blocks": node.RetainBlocks, - "key_type": node.PrivvalKey.Type(), - "disable_legacy_p2p": node.DisableLegacyP2P, + "chain_id": node.Testnet.Name, + "dir": "data/app", + "listen": AppAddressUNIX, + "mode": node.Mode, + "proxy_port": node.ProxyPort, + "protocol": "socket", + "persist_interval": node.PersistInterval, + "snapshot_interval": node.SnapshotInterval, + "retain_blocks": node.RetainBlocks, + "key_type": node.PrivvalKey.Type(), } switch node.ABCIProtocol { case e2e.ProtocolUNIX: diff --git a/test/e2e/runner/start.go b/test/e2e/runner/start.go index 70e496af3..967d2519c 100644 --- a/test/e2e/runner/start.go +++ b/test/e2e/runner/start.go @@ -1,6 +1,7 @@ package main import ( + "context" "fmt" "sort" "time" @@ -8,7 +9,10 @@ import ( e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) -func Start(testnet *e2e.Testnet) error { +func Start(ctx context.Context, testnet *e2e.Testnet) error { + if len(testnet.Nodes) == 0 { + return fmt.Errorf("no nodes in testnet") + } // Nodes are already sorted by name. Sort them by name then startAt, // which gives the overall order startAt, mode, name. @@ -25,12 +29,11 @@ func Start(testnet *e2e.Testnet) error { } return false }) + sort.SliceStable(nodeQueue, func(i, j int) bool { return nodeQueue[i].StartAt < nodeQueue[j].StartAt }) - if len(nodeQueue) == 0 { - return fmt.Errorf("no nodes in testnet") - } + if nodeQueue[0].StartAt > 0 { return fmt.Errorf("no initial nodes in testnet") } @@ -43,42 +46,78 @@ func Start(testnet *e2e.Testnet) error { if err := execCompose(testnet.Dir, "up", "-d", node.Name); err != nil { return err } - if _, err := waitForNode(node, 0, time.Minute); err != nil { + + if err := func() error { + ctx, cancel := context.WithTimeout(ctx, time.Minute) + defer cancel() + + _, err := waitForNode(ctx, node, 0) + return err + }(); err != nil { return err } + node.HasStarted = true logger.Info(fmt.Sprintf("Node %v up on http://127.0.0.1:%v", node.Name, node.ProxyPort)) } + networkHeight := testnet.InitialHeight + // Wait for initial height - logger.Info(fmt.Sprintf("Waiting for initial height %v...", testnet.InitialHeight)) - block, blockID, err := waitForHeight(testnet, testnet.InitialHeight) + logger.Info("Waiting for initial height", + "height", networkHeight, + "nodes", len(testnet.Nodes)-len(nodeQueue), + "pending", len(nodeQueue)) + + block, blockID, err := waitForHeight(ctx, testnet, networkHeight) if err != nil { return err } - // Update any state sync nodes with a trusted height and hash for _, node := range nodeQueue { - if node.StateSync || node.Mode == e2e.ModeLight { + if node.StartAt > networkHeight { + // if we're starting a node that's ahead of + // the last known height of the network, then + // we should make sure that the rest of the + // network has reached at least the height + // that this node will start at before we + // start the node. + + logger.Info("Waiting for network to advance to height", + "node", node.Name, + "last_height", networkHeight, + "waiting_for", node.StartAt, + "size", len(testnet.Nodes)-len(nodeQueue), + "pending", len(nodeQueue)) + + networkHeight = node.StartAt + + block, blockID, err = waitForHeight(ctx, testnet, networkHeight) + if err != nil { + return err + } + } + + // Update any state sync nodes with a trusted height and hash + if node.StateSync != e2e.StateSyncDisabled || node.Mode == e2e.ModeLight { err = UpdateConfigStateSync(node, block.Height, blockID.Hash.Bytes()) if err != nil { return err } } - } - // Start up remaining nodes - for _, node := range nodeQueue { - logger.Info(fmt.Sprintf("Starting node %v at height %v...", node.Name, node.StartAt)) - if _, _, err := waitForHeight(testnet, node.StartAt); err != nil { - return err - } if err := execCompose(testnet.Dir, "up", "-d", node.Name); err != nil { return err } - status, err := waitForNode(node, node.StartAt, 8*time.Minute) + + wctx, wcancel := context.WithTimeout(ctx, 8*time.Minute) + status, err := waitForNode(wctx, node, node.StartAt) if err != nil { + wcancel() return err } + wcancel() + + node.HasStarted = true logger.Info(fmt.Sprintf("Node %v up on http://127.0.0.1:%v at height %v", node.Name, node.ProxyPort, status.SyncInfo.LatestBlockHeight)) } diff --git a/test/e2e/runner/test.go b/test/e2e/runner/test.go index 834ce6f2d..ac24b0cd2 100644 --- a/test/e2e/runner/test.go +++ b/test/e2e/runner/test.go @@ -15,5 +15,5 @@ func Test(testnet *e2e.Testnet) error { return err } - return execVerbose("go", "test", "-count", "1", "./tests/...") + return execVerbose("./build/tests", "-test.count", "1") } diff --git a/test/e2e/runner/wait.go b/test/e2e/runner/wait.go index 9f3a4c438..e3f955071 100644 --- a/test/e2e/runner/wait.go +++ b/test/e2e/runner/wait.go @@ -1,31 +1,27 @@ package main import ( + "context" "fmt" - "time" e2e "github.com/tendermint/tendermint/test/e2e/pkg" ) // Wait waits for a number of blocks to be produced, and for all nodes to catch // up with it. -func Wait(testnet *e2e.Testnet, blocks int64) error { - block, _, err := waitForHeight(testnet, 0) +func Wait(ctx context.Context, testnet *e2e.Testnet, blocks int64) error { + block, err := getLatestBlock(ctx, testnet) if err != nil { return err } - return WaitUntil(testnet, block.Height+blocks) + return WaitUntil(ctx, testnet, block.Height+blocks) } // WaitUntil waits until a given height has been reached. -func WaitUntil(testnet *e2e.Testnet, height int64) error { +func WaitUntil(ctx context.Context, testnet *e2e.Testnet, height int64) error { logger.Info(fmt.Sprintf("Waiting for all nodes to reach height %v...", height)) - _, err := waitForAllNodes(testnet, height, waitingTime(len(testnet.Nodes))) + + _, _, err := waitForHeight(ctx, testnet, height) + return err } - -// waitingTime estimates how long it should take for a node to reach the height. -// More nodes in a network implies we may expect a slower network and may have to wait longer. -func waitingTime(nodes int) time.Duration { - return time.Minute + (time.Duration(nodes) * (30 * time.Second)) -} diff --git a/test/e2e/tests/app_test.go b/test/e2e/tests/app_test.go index 08710f168..ab6f9739e 100644 --- a/test/e2e/tests/app_test.go +++ b/test/e2e/tests/app_test.go @@ -2,6 +2,7 @@ package e2e_test import ( "bytes" + "context" "fmt" "math/rand" "testing" @@ -10,6 +11,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/rpc/client/http" e2e "github.com/tendermint/tendermint/test/e2e/pkg" "github.com/tendermint/tendermint/types" ) @@ -17,9 +19,6 @@ import ( // Tests that any initial state given in genesis has made it into the app. func TestApp_InitialState(t *testing.T) { testNode(t, func(t *testing.T, node e2e.Node) { - if node.Stateless() { - return - } if len(node.Testnet.InitialState) == 0 { return } @@ -39,71 +38,125 @@ func TestApp_InitialState(t *testing.T) { // block and the node sync status. func TestApp_Hash(t *testing.T) { testNode(t, func(t *testing.T, node e2e.Node) { - if node.Mode == e2e.ModeSeed { - return - } - client, err := node.Client() require.NoError(t, err) info, err := client.ABCIInfo(ctx) require.NoError(t, err) require.NotEmpty(t, info.Response.LastBlockAppHash, "expected app to return app hash") - block, err := client.Block(ctx, nil) - require.NoError(t, err) - require.EqualValues(t, info.Response.LastBlockAppHash, block.Block.AppHash, - "app hash does not match last block's app hash") - status, err := client.Status(ctx) require.NoError(t, err) - require.EqualValues(t, info.Response.LastBlockAppHash, status.SyncInfo.LatestAppHash, - "app hash does not match node status") + + block, err := client.Block(ctx, &info.Response.LastBlockHeight) + require.NoError(t, err) + + if info.Response.LastBlockHeight == block.Block.Height { + require.EqualValues(t, info.Response.LastBlockAppHash, block.Block.AppHash.Bytes(), + "app hash does not match last block's app hash") + } + + require.True(t, status.SyncInfo.LatestBlockHeight >= info.Response.LastBlockHeight, + "status out of sync with application") }) } // Tests that we can set a value and retrieve it. func TestApp_Tx(t *testing.T) { - testNode(t, func(t *testing.T, node e2e.Node) { - if node.Mode == e2e.ModeSeed { - return + type broadcastFunc func(context.Context, types.Tx) error + + testCases := []struct { + Name string + WaitTime time.Duration + BroadcastTx func(client *http.HTTP) broadcastFunc + ShouldSkip bool + }{ + { + Name: "Sync", + WaitTime: time.Minute, + BroadcastTx: func(client *http.HTTP) broadcastFunc { + return func(ctx context.Context, tx types.Tx) error { + _, err := client.BroadcastTxSync(ctx, tx) + return err + } + }, + }, + { + Name: "Commit", + WaitTime: 15 * time.Second, + // TODO: turn this check back on if it can + // return reliably. Currently these calls have + // a hard timeout of 10s (server side + // configured). The Sync check is probably + // safe. + ShouldSkip: true, + BroadcastTx: func(client *http.HTTP) broadcastFunc { + return func(ctx context.Context, tx types.Tx) error { + _, err := client.BroadcastTxCommit(ctx, tx) + return err + } + }, + }, + { + Name: "Async", + WaitTime: 90 * time.Second, + // TODO: turn this check back on if there's a + // way to avoid failures in the case that the + // transaction doesn't make it into the + // mempool. (retries?) + ShouldSkip: true, + BroadcastTx: func(client *http.HTTP) broadcastFunc { + return func(ctx context.Context, tx types.Tx) error { + _, err := client.BroadcastTxAsync(ctx, tx) + return err + } + }, + }, + } + + for idx, test := range testCases { + if test.ShouldSkip { + continue } + t.Run(test.Name, func(t *testing.T) { + // testNode calls t.Parallel as well, so we should + // have a copy of the + test := testCases[idx] + testNode(t, func(t *testing.T, node e2e.Node) { + client, err := node.Client() + require.NoError(t, err) - client, err := node.Client() - require.NoError(t, err) + // Generate a random value, to prevent duplicate tx errors when + // manually running the test multiple times for a testnet. + bz := make([]byte, 32) + _, err = rand.Read(bz) + require.NoError(t, err) - // Generate a random value, to prevent duplicate tx errors when - // manually running the test multiple times for a testnet. - r := rand.New(rand.NewSource(time.Now().UnixNano())) - bz := make([]byte, 32) - _, err = r.Read(bz) - require.NoError(t, err) + key := fmt.Sprintf("testapp-tx-%v", node.Name) + value := fmt.Sprintf("%x", bz) + tx := types.Tx(fmt.Sprintf("%v=%v", key, value)) - key := fmt.Sprintf("testapp-tx-%v", node.Name) - value := fmt.Sprintf("%x", bz) - tx := types.Tx(fmt.Sprintf("%v=%v", key, value)) + require.NoError(t, test.BroadcastTx(client)(ctx, tx)) - _, err = client.BroadcastTxSync(ctx, tx) - require.NoError(t, err) + hash := tx.Hash() - hash := tx.Hash() - waitTime := 20 * time.Second + require.Eventuallyf(t, func() bool { + txResp, err := client.Tx(ctx, hash, false) + return err == nil && bytes.Equal(txResp.Tx, tx) + }, + test.WaitTime, // timeout + time.Second, // interval + "submitted tx %X wasn't committed after %v", + hash, test.WaitTime, + ) - require.Eventuallyf(t, func() bool { - txResp, err := client.Tx(ctx, hash, false) - return err == nil && bytes.Equal(txResp.Tx, tx) - }, waitTime, time.Second, - "submitted tx %X wasn't committed after %v", hash, waitTime, - ) + abciResp, err := client.ABCIQuery(ctx, "", []byte(key)) + require.NoError(t, err) + assert.Equal(t, key, string(abciResp.Response.Key)) + assert.Equal(t, value, string(abciResp.Response.Value)) + }) - // NOTE: we don't test abci query of the light client - if node.Mode == e2e.ModeLight { - return - } + }) - abciResp, err := client.ABCIQuery(ctx, "", []byte(key)) - require.NoError(t, err) - assert.Equal(t, key, string(abciResp.Response.Key)) - assert.Equal(t, value, string(abciResp.Response.Value)) + } - }) } diff --git a/test/e2e/tests/block_test.go b/test/e2e/tests/block_test.go index 21aeeda99..f83cf3757 100644 --- a/test/e2e/tests/block_test.go +++ b/test/e2e/tests/block_test.go @@ -13,10 +13,6 @@ import ( func TestBlock_Header(t *testing.T) { blocks := fetchBlockChain(t) testNode(t, func(t *testing.T, node e2e.Node) { - if node.Mode == e2e.ModeSeed { - return - } - client, err := node.Client() require.NoError(t, err) status, err := client.Status(ctx) @@ -34,7 +30,7 @@ func TestBlock_Header(t *testing.T) { } // the first blocks after state sync come from the backfill process // and are therefore not complete - if node.StateSync && block.Header.Height <= first+e2e.EvidenceAgeHeight+1 { + if node.StateSync != e2e.StateSyncDisabled && block.Header.Height <= first+e2e.EvidenceAgeHeight+1 { continue } if block.Header.Height > last { @@ -55,10 +51,6 @@ func TestBlock_Header(t *testing.T) { // Tests that the node contains the expected block range. func TestBlock_Range(t *testing.T) { testNode(t, func(t *testing.T, node e2e.Node) { - if node.Mode == e2e.ModeSeed { - return - } - client, err := node.Client() require.NoError(t, err) status, err := client.Status(ctx) @@ -70,7 +62,7 @@ func TestBlock_Range(t *testing.T) { switch { // if the node state synced we ignore any assertions because it's hard to know how far back // the node ran reverse sync for - case node.StateSync: + case node.StateSync != e2e.StateSyncDisabled: break case node.RetainBlocks > 0 && int64(node.RetainBlocks) < (last-node.Testnet.InitialHeight+1): // Delta handles race conditions in reading first/last heights. @@ -83,7 +75,7 @@ func TestBlock_Range(t *testing.T) { } for h := first; h <= last; h++ { - if node.StateSync && h <= first+e2e.EvidenceAgeHeight+1 { + if node.StateSync != e2e.StateSyncDisabled && h <= first+e2e.EvidenceAgeHeight+1 { continue } resp, err := client.Block(ctx, &(h)) diff --git a/test/e2e/tests/e2e_test.go b/test/e2e/tests/e2e_test.go index 15c747b5b..a645fd7c1 100644 --- a/test/e2e/tests/e2e_test.go +++ b/test/e2e/tests/e2e_test.go @@ -3,14 +3,13 @@ package e2e_test import ( "context" "os" - "path/filepath" "sync" "testing" "github.com/stretchr/testify/require" rpchttp "github.com/tendermint/tendermint/rpc/client/http" - rpctypes "github.com/tendermint/tendermint/rpc/core/types" + rpctypes "github.com/tendermint/tendermint/rpc/coretypes" e2e "github.com/tendermint/tendermint/test/e2e/pkg" "github.com/tendermint/tendermint/types" ) @@ -30,8 +29,9 @@ var ( blocksCacheMtx = sync.Mutex{} ) -// testNode runs tests for testnet nodes. The callback function is given a -// single node to test, running as a subtest in parallel with other subtests. +// testNode runs tests for testnet nodes. The callback function is +// given a single stateful node to test, running as a subtest in +// parallel with other subtests. // // The testnet manifest must be given as the envvar E2E_MANIFEST. If not set, // these tests are skipped so that they're not picked up during normal unit @@ -51,6 +51,11 @@ func testNode(t *testing.T, testFunc func(*testing.T, e2e.Node)) { for _, node := range nodes { node := *node + + if node.Stateless() { + continue + } + t.Run(node.Name, func(t *testing.T) { t.Parallel() testFunc(t, node) @@ -66,9 +71,6 @@ func loadTestnet(t *testing.T) e2e.Testnet { if manifest == "" { t.Skip("E2E_MANIFEST not set, not an end-to-end test run") } - if !filepath.IsAbs(manifest) { - manifest = filepath.Join("..", manifest) - } testnetCacheMtx.Lock() defer testnetCacheMtx.Unlock() diff --git a/test/e2e/tests/net_test.go b/test/e2e/tests/net_test.go index 1ca43fa05..e6ff27a0e 100644 --- a/test/e2e/tests/net_test.go +++ b/test/e2e/tests/net_test.go @@ -14,11 +14,6 @@ func TestNet_Peers(t *testing.T) { t.SkipNow() testNode(t, func(t *testing.T, node e2e.Node) { - // Seed nodes shouldn't necessarily mesh with the entire network. - if node.Mode == e2e.ModeSeed { - return - } - client, err := node.Client() require.NoError(t, err) netInfo, err := client.NetInfo(ctx) @@ -32,11 +27,12 @@ func TestNet_Peers(t *testing.T) { seen[n.Name] = (n.Name == node.Name) // we've clearly seen ourself } for _, peerInfo := range netInfo.Peers { - peer := node.Testnet.LookupNode(peerInfo.NodeInfo.Moniker) - require.NotNil(t, peer, "unknown node %v", peerInfo.NodeInfo.Moniker) - require.Equal(t, peer.IP.String(), peerInfo.RemoteIP, - "unexpected IP address for peer %v", peer.Name) - seen[peerInfo.NodeInfo.Moniker] = true + id := peerInfo.ID + peer := node.Testnet.LookupNode(string(id)) + require.NotNil(t, peer, "unknown node %v", id) + require.Contains(t, peerInfo.URL, peer.IP.String(), + "unexpected IP address for peer %v", id) + seen[string(id)] = true } for name := range seen { diff --git a/test/e2e/tests/validator_test.go b/test/e2e/tests/validator_test.go index 847a8d388..8292e86ee 100644 --- a/test/e2e/tests/validator_test.go +++ b/test/e2e/tests/validator_test.go @@ -14,16 +14,20 @@ import ( // scheduled validator updates. func TestValidator_Sets(t *testing.T) { testNode(t, func(t *testing.T, node e2e.Node) { - if node.Mode == e2e.ModeSeed { - return - } - client, err := node.Client() require.NoError(t, err) status, err := client.Status(ctx) require.NoError(t, err) first := status.SyncInfo.EarliestBlockHeight + + // for nodes that have to catch up, we should only + // check the validator sets for nodes after this + // point, to avoid inconsistencies with backfill. + if node.StartAt > first { + first = node.StartAt + } + last := status.SyncInfo.LatestBlockHeight // skip first block if node is pruning blocks, to avoid race conditions diff --git a/test/fuzz/mempool/v0/checktx.go b/test/fuzz/mempool/v0/checktx.go index a90ec2290..62eda9729 100644 --- a/test/fuzz/mempool/v0/checktx.go +++ b/test/fuzz/mempool/v0/checktx.go @@ -3,19 +3,19 @@ package v0 import ( "context" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/mempool" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" - "github.com/tendermint/tendermint/proxy" ) var mp mempool.Mempool func init() { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - appConnMem, _ := cc.NewABCIClient() + cc := abciclient.NewLocalCreator(app) + appConnMem, _ := cc() err := appConnMem.Start() if err != nil { panic(err) diff --git a/test/fuzz/mempool/v1/checktx.go b/test/fuzz/mempool/v1/checktx.go index 6194f3bcb..2ed0b97ff 100644 --- a/test/fuzz/mempool/v1/checktx.go +++ b/test/fuzz/mempool/v1/checktx.go @@ -3,19 +3,19 @@ package v1 import ( "context" + abciclient "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/mempool" mempoolv1 "github.com/tendermint/tendermint/internal/mempool/v0" - "github.com/tendermint/tendermint/proxy" ) var mp mempool.Mempool func init() { app := kvstore.NewApplication() - cc := proxy.NewLocalClientCreator(app) - appConnMem, _ := cc.NewABCIClient() + cc := abciclient.NewLocalCreator(app) + appConnMem, _ := cc() err := appConnMem.Start() if err != nil { panic(err) diff --git a/test/fuzz/p2p/addrbook/fuzz.go b/test/fuzz/p2p/addrbook/fuzz.go deleted file mode 100644 index 6d5548fc7..000000000 --- a/test/fuzz/p2p/addrbook/fuzz.go +++ /dev/null @@ -1,35 +0,0 @@ -// nolint: gosec -package addrbook - -import ( - "encoding/json" - "fmt" - "math/rand" - - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/pex" -) - -var addrBook = pex.NewAddrBook("./testdata/addrbook.json", true) - -func Fuzz(data []byte) int { - addr := new(p2p.NetAddress) - if err := json.Unmarshal(data, addr); err != nil { - return -1 - } - - // Fuzz AddAddress. - err := addrBook.AddAddress(addr, addr) - if err != nil { - return 0 - } - - // Also, make sure PickAddress always returns a non-nil address. - bias := rand.Intn(100) - if p := addrBook.PickAddress(bias); p == nil { - panic(fmt.Sprintf("picked a nil address (bias: %d, addrBook size: %v)", - bias, addrBook.Size())) - } - - return 1 -} diff --git a/test/fuzz/p2p/addrbook/fuzz_test.go b/test/fuzz/p2p/addrbook/fuzz_test.go deleted file mode 100644 index 4ec7aebd9..000000000 --- a/test/fuzz/p2p/addrbook/fuzz_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package addrbook_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/test/fuzz/p2p/addrbook" -) - -const testdataCasesDir = "testdata/cases" - -func TestAddrbookTestdataCases(t *testing.T) { - entries, err := os.ReadDir(testdataCasesDir) - require.NoError(t, err) - - for _, e := range entries { - entry := e - t.Run(entry.Name(), func(t *testing.T) { - defer func() { - r := recover() - require.Nilf(t, r, "testdata/cases test panic") - }() - f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) - require.NoError(t, err) - input, err := ioutil.ReadAll(f) - require.NoError(t, err) - addrbook.Fuzz(input) - }) - } -} diff --git a/test/fuzz/p2p/addrbook/init-corpus/main.go b/test/fuzz/p2p/addrbook/init-corpus/main.go deleted file mode 100644 index 1166f9bd7..000000000 --- a/test/fuzz/p2p/addrbook/init-corpus/main.go +++ /dev/null @@ -1,59 +0,0 @@ -// nolint: gosec -package main - -import ( - "encoding/json" - "flag" - "fmt" - "io/ioutil" - "log" - "net" - "os" - "path/filepath" - - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" -) - -func main() { - baseDir := flag.String("base", ".", `where the "corpus" directory will live`) - flag.Parse() - - initCorpus(*baseDir) -} - -func initCorpus(baseDir string) { - log.SetFlags(0) - - // create "corpus" directory - corpusDir := filepath.Join(baseDir, "corpus") - if err := os.MkdirAll(corpusDir, 0755); err != nil { - log.Fatalf("Creating %q err: %v", corpusDir, err) - } - - // create corpus - privKey := ed25519.GenPrivKey() - addrs := []*p2p.NetAddress{ - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(0, 0, 0, 0), Port: 0}, - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(127, 0, 0, 0), Port: 80}, - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(213, 87, 10, 200), Port: 8808}, - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.IPv4(111, 111, 111, 111), Port: 26656}, - {ID: types.NodeIDFromPubKey(privKey.PubKey()), IP: net.ParseIP("2001:db8::68"), Port: 26656}, - } - - for i, addr := range addrs { - filename := filepath.Join(corpusDir, fmt.Sprintf("%d.json", i)) - - bz, err := json.Marshal(addr) - if err != nil { - log.Fatalf("can't marshal %v: %v", addr, err) - } - - if err := ioutil.WriteFile(filename, bz, 0644); err != nil { - log.Fatalf("can't write %v to %q: %v", addr, filename, err) - } - - log.Printf("wrote %q", filename) - } -} diff --git a/test/fuzz/p2p/addrbook/testdata/cases/empty b/test/fuzz/p2p/addrbook/testdata/cases/empty deleted file mode 100644 index e69de29bb..000000000 diff --git a/test/fuzz/p2p/pex/fuzz_test.go b/test/fuzz/p2p/pex/fuzz_test.go deleted file mode 100644 index 8a194e730..000000000 --- a/test/fuzz/p2p/pex/fuzz_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package pex_test - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/test/fuzz/p2p/pex" -) - -const testdataCasesDir = "testdata/cases" - -func TestPexTestdataCases(t *testing.T) { - entries, err := os.ReadDir(testdataCasesDir) - require.NoError(t, err) - - for _, e := range entries { - entry := e - t.Run(entry.Name(), func(t *testing.T) { - defer func() { - r := recover() - require.Nilf(t, r, "testdata/cases test panic") - }() - f, err := os.Open(filepath.Join(testdataCasesDir, entry.Name())) - require.NoError(t, err) - input, err := ioutil.ReadAll(f) - require.NoError(t, err) - pex.Fuzz(input) - }) - } -} diff --git a/test/fuzz/p2p/pex/init-corpus/main.go b/test/fuzz/p2p/pex/init-corpus/main.go deleted file mode 100644 index e90216864..000000000 --- a/test/fuzz/p2p/pex/init-corpus/main.go +++ /dev/null @@ -1,84 +0,0 @@ -// nolint: gosec -package main - -import ( - "flag" - "fmt" - "io/ioutil" - "log" - "math/rand" - "os" - "path/filepath" - - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/pex" - tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" -) - -func main() { - baseDir := flag.String("base", ".", `where the "corpus" directory will live`) - flag.Parse() - - initCorpus(*baseDir) -} - -func initCorpus(rootDir string) { - log.SetFlags(0) - - corpusDir := filepath.Join(rootDir, "corpus") - if err := os.MkdirAll(corpusDir, 0755); err != nil { - log.Fatalf("Creating %q err: %v", corpusDir, err) - } - sizes := []int{0, 1, 2, 17, 5, 31} - - // Make the PRNG predictable - rand.Seed(10) - - for _, n := range sizes { - var addrs []*p2p.NetAddress - - // IPv4 addresses - for i := 0; i < n; i++ { - privKey := ed25519.GenPrivKey() - addr := fmt.Sprintf( - "%s@%v.%v.%v.%v:26656", - types.NodeIDFromPubKey(privKey.PubKey()), - rand.Int()%256, - rand.Int()%256, - rand.Int()%256, - rand.Int()%256, - ) - netAddr, _ := types.NewNetAddressString(addr) - addrs = append(addrs, netAddr) - } - - // IPv6 addresses - privKey := ed25519.GenPrivKey() - ipv6a, err := types.NewNetAddressString( - fmt.Sprintf("%s@[ff02::1:114]:26656", types.NodeIDFromPubKey(privKey.PubKey()))) - if err != nil { - log.Fatalf("can't create a new netaddress: %v", err) - } - addrs = append(addrs, ipv6a) - - msg := tmp2p.PexMessage{ - Sum: &tmp2p.PexMessage_PexResponse{ - PexResponse: &tmp2p.PexResponse{Addresses: pex.NetAddressesToProto(addrs)}, - }, - } - bz, err := msg.Marshal() - if err != nil { - log.Fatalf("unable to marshal: %v", err) - } - - filename := filepath.Join(rootDir, "corpus", fmt.Sprintf("%d", n)) - - if err := ioutil.WriteFile(filename, bz, 0644); err != nil { - log.Fatalf("can't write %X to %q: %v", bz, filename, err) - } - - log.Printf("wrote %q", filename) - } -} diff --git a/test/fuzz/p2p/pex/reactor_receive.go b/test/fuzz/p2p/pex/reactor_receive.go deleted file mode 100644 index 388361a4e..000000000 --- a/test/fuzz/p2p/pex/reactor_receive.go +++ /dev/null @@ -1,95 +0,0 @@ -package pex - -import ( - "net" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/internal/p2p/pex" - "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/version" -) - -var ( - pexR *pex.Reactor - peer p2p.Peer - logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) -) - -func init() { - addrB := pex.NewAddrBook("./testdata/addrbook1", false) - pexR = pex.NewReactor(addrB, &pex.ReactorConfig{SeedMode: false}) - pexR.SetLogger(logger) - peer = newFuzzPeer() - pexR.AddPeer(peer) - - cfg := config.DefaultP2PConfig() - cfg.PexReactor = true - sw := p2p.MakeSwitch(cfg, 0, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { - return sw - }, logger) - pexR.SetSwitch(sw) -} - -func Fuzz(data []byte) int { - if len(data) == 0 { - return -1 - } - - pexR.Receive(pex.PexChannel, peer, data) - - if !peer.IsRunning() { - // do not increase priority for msgs which lead to peer being stopped - return 0 - } - - return 1 -} - -type fuzzPeer struct { - *service.BaseService - m map[string]interface{} -} - -var _ p2p.Peer = (*fuzzPeer)(nil) - -func newFuzzPeer() *fuzzPeer { - fp := &fuzzPeer{m: make(map[string]interface{})} - fp.BaseService = service.NewBaseService(nil, "fuzzPeer", fp) - return fp -} - -var privKey = ed25519.GenPrivKey() -var nodeID = types.NodeIDFromPubKey(privKey.PubKey()) -var defaultNodeInfo = types.NodeInfo{ - ProtocolVersion: types.ProtocolVersion{ - P2P: version.P2PProtocol, - Block: version.BlockProtocol, - App: 0, - }, - NodeID: nodeID, - ListenAddr: "127.0.0.1:0", - Moniker: "foo1", -} - -func (fp *fuzzPeer) FlushStop() {} -func (fp *fuzzPeer) ID() types.NodeID { return nodeID } -func (fp *fuzzPeer) RemoteIP() net.IP { return net.IPv4(198, 163, 190, 214) } -func (fp *fuzzPeer) RemoteAddr() net.Addr { - return &net.TCPAddr{IP: fp.RemoteIP(), Port: 26656, Zone: ""} -} -func (fp *fuzzPeer) IsOutbound() bool { return false } -func (fp *fuzzPeer) IsPersistent() bool { return false } -func (fp *fuzzPeer) CloseConn() error { return nil } -func (fp *fuzzPeer) NodeInfo() types.NodeInfo { return defaultNodeInfo } -func (fp *fuzzPeer) Status() p2p.ConnectionStatus { var cs p2p.ConnectionStatus; return cs } -func (fp *fuzzPeer) SocketAddr() *p2p.NetAddress { - return types.NewNetAddress(fp.ID(), fp.RemoteAddr()) -} -func (fp *fuzzPeer) Send(byte, []byte) bool { return true } -func (fp *fuzzPeer) TrySend(byte, []byte) bool { return true } -func (fp *fuzzPeer) Set(key string, value interface{}) { fp.m[key] = value } -func (fp *fuzzPeer) Get(key string) interface{} { return fp.m[key] } diff --git a/test/fuzz/p2p/pex/testdata/addrbook1 b/test/fuzz/p2p/pex/testdata/addrbook1 deleted file mode 100644 index acf3e721d..000000000 --- a/test/fuzz/p2p/pex/testdata/addrbook1 +++ /dev/null @@ -1,1705 +0,0 @@ -{ - "Key": "badd73ebd4eeafbaefc01e0c", - "Addrs": [ - { - "Addr": { - "IP": "233.174.138.192", - "Port": 48186 - }, - "Src": { - "IP": "198.37.90.115", - "Port": 29492 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692278-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 55 - ] - }, - { - "Addr": { - "IP": "181.28.96.104", - "Port": 26776 - }, - "Src": { - "IP": "183.12.35.241", - "Port": 26794 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692289-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 18 - ] - }, - { - "Addr": { - "IP": "141.85.194.118", - "Port": 39768 - }, - "Src": { - "IP": "120.130.90.63", - "Port": 61750 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692383-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 185 - ] - }, - { - "Addr": { - "IP": "167.72.9.155", - "Port": 9542 - }, - "Src": { - "IP": "95.158.40.108", - "Port": 14929 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692604-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 250 - ] - }, - { - "Addr": { - "IP": "124.118.94.27", - "Port": 50333 - }, - "Src": { - "IP": "208.169.57.96", - "Port": 19754 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692046-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 0 - ] - }, - { - "Addr": { - "IP": "158.197.4.226", - "Port": 25979 - }, - "Src": { - "IP": "3.129.219.107", - "Port": 50374 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692211-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 174 - ] - }, - { - "Addr": { - "IP": "170.42.135.37", - "Port": 34524 - }, - "Src": { - "IP": "73.125.53.212", - "Port": 49691 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692241-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 14 - ] - }, - { - "Addr": { - "IP": "234.69.254.147", - "Port": 31885 - }, - "Src": { - "IP": "167.106.61.34", - "Port": 22187 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692609-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 213 - ] - }, - { - "Addr": { - "IP": "32.176.173.90", - "Port": 17250 - }, - "Src": { - "IP": "118.91.243.12", - "Port": 26781 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692273-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 35 - ] - }, - { - "Addr": { - "IP": "162.154.114.145", - "Port": 13875 - }, - "Src": { - "IP": "198.178.108.166", - "Port": 59623 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692373-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 216 - ] - }, - { - "Addr": { - "IP": "67.128.167.93", - "Port": 50513 - }, - "Src": { - "IP": "104.93.115.28", - "Port": 48298 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692399-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 18 - ] - }, - { - "Addr": { - "IP": "132.175.221.206", - "Port": 61037 - }, - "Src": { - "IP": "112.49.189.65", - "Port": 56186 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692422-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 31 - ] - }, - { - "Addr": { - "IP": "155.49.24.238", - "Port": 26261 - }, - "Src": { - "IP": "97.10.121.246", - "Port": 8694 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692473-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 23 - ] - }, - { - "Addr": { - "IP": "22.215.7.233", - "Port": 32487 - }, - "Src": { - "IP": "214.236.105.23", - "Port": 26870 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692572-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 255 - ] - }, - { - "Addr": { - "IP": "253.170.228.231", - "Port": 5002 - }, - "Src": { - "IP": "225.49.137.209", - "Port": 16908 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692619-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 161 - ] - }, - { - "Addr": { - "IP": "162.126.204.39", - "Port": 62618 - }, - "Src": { - "IP": "250.214.168.131", - "Port": 3237 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.69203-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 7 - ] - }, - { - "Addr": { - "IP": "83.154.228.215", - "Port": 23508 - }, - "Src": { - "IP": "66.33.77.170", - "Port": 52207 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692153-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 8 - ] - }, - { - "Addr": { - "IP": "132.49.63.65", - "Port": 53651 - }, - "Src": { - "IP": "250.164.163.212", - "Port": 8612 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692253-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 98 - ] - }, - { - "Addr": { - "IP": "200.168.34.12", - "Port": 61901 - }, - "Src": { - "IP": "133.185.186.115", - "Port": 14186 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692488-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 214 - ] - }, - { - "Addr": { - "IP": "31.93.45.219", - "Port": 61036 - }, - "Src": { - "IP": "176.191.214.170", - "Port": 33402 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692024-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 68 - ] - }, - { - "Addr": { - "IP": "250.189.27.93", - "Port": 51665 - }, - "Src": { - "IP": "93.161.116.107", - "Port": 53482 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692196-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 42 - ] - }, - { - "Addr": { - "IP": "50.7.17.126", - "Port": 64300 - }, - "Src": { - "IP": "233.234.64.214", - "Port": 61061 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692444-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 13 - ] - }, - { - "Addr": { - "IP": "88.85.81.64", - "Port": 34834 - }, - "Src": { - "IP": "4.240.150.250", - "Port": 63064 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692248-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 195 - ] - }, - { - "Addr": { - "IP": "242.117.244.198", - "Port": 4363 - }, - "Src": { - "IP": "149.29.34.42", - "Port": 62567 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692263-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 174 - ] - }, - { - "Addr": { - "IP": "245.155.175.114", - "Port": 37262 - }, - "Src": { - "IP": "75.85.36.49", - "Port": 7101 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692313-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 135 - ] - }, - { - "Addr": { - "IP": "224.184.241.26", - "Port": 55870 - }, - "Src": { - "IP": "52.15.194.216", - "Port": 4733 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692327-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 74 - ] - }, - { - "Addr": { - "IP": "43.178.26.188", - "Port": 55914 - }, - "Src": { - "IP": "103.250.250.35", - "Port": 1566 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692577-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 65 - ] - }, - { - "Addr": { - "IP": "102.117.172.117", - "Port": 35855 - }, - "Src": { - "IP": "114.152.204.187", - "Port": 21156 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692158-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 80 - ] - }, - { - "Addr": { - "IP": "39.33.41.199", - "Port": 51600 - }, - "Src": { - "IP": "119.65.88.38", - "Port": 41239 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692188-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 24 - ] - }, - { - "Addr": { - "IP": "63.164.56.227", - "Port": 1660 - }, - "Src": { - "IP": "169.54.47.92", - "Port": 2818 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692227-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 10 - ] - }, - { - "Addr": { - "IP": "50.183.223.115", - "Port": 26910 - }, - "Src": { - "IP": "115.98.199.4", - "Port": 8767 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692201-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 65 - ] - }, - { - "Addr": { - "IP": "132.94.203.167", - "Port": 53156 - }, - "Src": { - "IP": "17.195.234.168", - "Port": 29405 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692294-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 240 - ] - }, - { - "Addr": { - "IP": "135.194.230.212", - "Port": 14340 - }, - "Src": { - "IP": "160.2.241.10", - "Port": 36553 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692363-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 63 - ] - }, - { - "Addr": { - "IP": "116.53.200.25", - "Port": 27092 - }, - "Src": { - "IP": "219.104.163.247", - "Port": 50476 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692543-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 201 - ] - }, - { - "Addr": { - "IP": "125.77.44.185", - "Port": 55291 - }, - "Src": { - "IP": "77.15.232.117", - "Port": 6934 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692589-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 8 - ] - }, - { - "Addr": { - "IP": "27.221.35.172", - "Port": 26418 - }, - "Src": { - "IP": "252.18.49.70", - "Port": 9835 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692068-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 80 - ] - }, - { - "Addr": { - "IP": "133.225.167.135", - "Port": 59468 - }, - "Src": { - "IP": "110.223.163.74", - "Port": 22576 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.69213-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 164 - ] - }, - { - "Addr": { - "IP": "155.131.178.240", - "Port": 60476 - }, - "Src": { - "IP": "143.82.157.1", - "Port": 43821 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692173-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 34 - ] - }, - { - "Addr": { - "IP": "207.13.48.52", - "Port": 28549 - }, - "Src": { - "IP": "238.224.177.29", - "Port": 44100 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692594-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 113 - ] - }, - { - "Addr": { - "IP": "91.137.2.184", - "Port": 44887 - }, - "Src": { - "IP": "72.131.70.84", - "Port": 29960 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692627-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 199 - ] - }, - { - "Addr": { - "IP": "169.59.252.76", - "Port": 57711 - }, - "Src": { - "IP": "194.132.91.119", - "Port": 18037 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692478-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 124 - ] - }, - { - "Addr": { - "IP": "25.174.143.229", - "Port": 41540 - }, - "Src": { - "IP": "58.215.132.148", - "Port": 64950 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692534-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 88 - ] - }, - { - "Addr": { - "IP": "71.239.78.239", - "Port": 46938 - }, - "Src": { - "IP": "156.98.186.169", - "Port": 32046 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692116-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 154 - ] - }, - { - "Addr": { - "IP": "94.137.107.61", - "Port": 20756 - }, - "Src": { - "IP": "101.201.138.179", - "Port": 22877 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692414-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 233 - ] - }, - { - "Addr": { - "IP": "216.62.174.112", - "Port": 60162 - }, - "Src": { - "IP": "225.114.119.144", - "Port": 1575 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692464-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 132 - ] - }, - { - "Addr": { - "IP": "65.183.81.125", - "Port": 17511 - }, - "Src": { - "IP": "12.96.14.61", - "Port": 42308 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692308-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 153 - ] - }, - { - "Addr": { - "IP": "142.26.87.52", - "Port": 41967 - }, - "Src": { - "IP": "60.124.157.139", - "Port": 20727 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692321-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 29 - ] - }, - { - "Addr": { - "IP": "13.77.198.44", - "Port": 54508 - }, - "Src": { - "IP": "142.73.70.174", - "Port": 19525 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692553-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 170 - ] - }, - { - "Addr": { - "IP": "63.192.219.12", - "Port": 46603 - }, - "Src": { - "IP": "26.136.66.29", - "Port": 38924 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692558-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 203 - ] - }, - { - "Addr": { - "IP": "120.82.251.151", - "Port": 43723 - }, - "Src": { - "IP": "136.104.122.219", - "Port": 47452 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692599-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 103 - ] - }, - { - "Addr": { - "IP": "74.79.96.159", - "Port": 46646 - }, - "Src": { - "IP": "218.60.242.116", - "Port": 5361 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692145-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 121 - ] - }, - { - "Addr": { - "IP": "194.65.211.174", - "Port": 43464 - }, - "Src": { - "IP": "87.5.112.153", - "Port": 56348 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692163-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 96 - ] - }, - { - "Addr": { - "IP": "237.158.179.80", - "Port": 32231 - }, - "Src": { - "IP": "210.240.52.244", - "Port": 29142 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692183-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 27 - ] - }, - { - "Addr": { - "IP": "81.157.122.4", - "Port": 9917 - }, - "Src": { - "IP": "213.226.144.152", - "Port": 29950 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692614-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 33 - ] - }, - { - "Addr": { - "IP": "180.147.73.220", - "Port": 367 - }, - "Src": { - "IP": "32.229.253.215", - "Port": 62165 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692529-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 240 - ] - }, - { - "Addr": { - "IP": "83.110.235.17", - "Port": 33231 - }, - "Src": { - "IP": "230.54.162.85", - "Port": 51569 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692563-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 234 - ] - }, - { - "Addr": { - "IP": "100.252.20.2", - "Port": 1633 - }, - "Src": { - "IP": "52.136.47.198", - "Port": 31916 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692644-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 254 - ] - }, - { - "Addr": { - "IP": "74.5.247.79", - "Port": 18703 - }, - "Src": { - "IP": "200.247.68.128", - "Port": 55844 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692378-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 160 - ] - }, - { - "Addr": { - "IP": "17.220.231.87", - "Port": 59015 - }, - "Src": { - "IP": "54.207.49.4", - "Port": 17877 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692404-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 21 - ] - }, - { - "Addr": { - "IP": "156.194.57.127", - "Port": 18944 - }, - "Src": { - "IP": "154.94.235.84", - "Port": 61610 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692439-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 32 - ] - }, - { - "Addr": { - "IP": "137.57.172.158", - "Port": 32031 - }, - "Src": { - "IP": "144.160.225.126", - "Port": 43225 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692568-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 4 - ] - }, - { - "Addr": { - "IP": "101.220.101.200", - "Port": 26480 - }, - "Src": { - "IP": "130.225.42.1", - "Port": 2522 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692637-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 173 - ] - }, - { - "Addr": { - "IP": "136.233.185.164", - "Port": 34011 - }, - "Src": { - "IP": "112.127.216.43", - "Port": 55317 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692649-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 106 - ] - }, - { - "Addr": { - "IP": "101.189.107.148", - "Port": 28671 - }, - "Src": { - "IP": "213.55.140.235", - "Port": 2547 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692178-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 72 - ] - }, - { - "Addr": { - "IP": "61.190.60.64", - "Port": 58467 - }, - "Src": { - "IP": "206.86.120.31", - "Port": 54422 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692358-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 191 - ] - }, - { - "Addr": { - "IP": "227.51.127.223", - "Port": 52754 - }, - "Src": { - "IP": "124.24.12.47", - "Port": 59878 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692393-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 122 - ] - }, - { - "Addr": { - "IP": "101.19.152.238", - "Port": 47491 - }, - "Src": { - "IP": "211.30.216.184", - "Port": 17610 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692135-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 251 - ] - }, - { - "Addr": { - "IP": "182.198.35.238", - "Port": 15065 - }, - "Src": { - "IP": "239.67.104.149", - "Port": 43039 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692268-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 179 - ] - }, - { - "Addr": { - "IP": "233.12.68.51", - "Port": 47544 - }, - "Src": { - "IP": "203.224.119.48", - "Port": 23337 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692454-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 51 - ] - }, - { - "Addr": { - "IP": "181.30.35.80", - "Port": 500 - }, - "Src": { - "IP": "174.200.32.161", - "Port": 10174 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692503-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 69 - ] - }, - { - "Addr": { - "IP": "49.104.89.21", - "Port": 54774 - }, - "Src": { - "IP": "245.95.238.161", - "Port": 14339 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692654-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 120 - ] - }, - { - "Addr": { - "IP": "65.150.169.199", - "Port": 11589 - }, - "Src": { - "IP": "150.110.183.207", - "Port": 17694 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692041-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 182 - ] - }, - { - "Addr": { - "IP": "84.203.198.48", - "Port": 47122 - }, - "Src": { - "IP": "141.209.147.221", - "Port": 26085 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692056-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 74 - ] - }, - { - "Addr": { - "IP": "220.10.106.180", - "Port": 27439 - }, - "Src": { - "IP": "124.170.244.46", - "Port": 5249 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692125-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 166 - ] - }, - { - "Addr": { - "IP": "120.208.32.34", - "Port": 27224 - }, - "Src": { - "IP": "64.194.118.103", - "Port": 24388 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.69251-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 149 - ] - }, - { - "Addr": { - "IP": "245.182.67.231", - "Port": 58067 - }, - "Src": { - "IP": "62.108.238.220", - "Port": 41851 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692522-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 201 - ] - }, - { - "Addr": { - "IP": "50.81.160.105", - "Port": 8113 - }, - "Src": { - "IP": "129.187.68.121", - "Port": 58612 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692284-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 94 - ] - }, - { - "Addr": { - "IP": "101.116.47.155", - "Port": 20287 - }, - "Src": { - "IP": "94.34.167.170", - "Port": 41821 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692299-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 134 - ] - }, - { - "Addr": { - "IP": "159.253.213.86", - "Port": 5222 - }, - "Src": { - "IP": "124.47.162.125", - "Port": 45742 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692429-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 25 - ] - }, - { - "Addr": { - "IP": "124.72.81.213", - "Port": 35723 - }, - "Src": { - "IP": "201.65.186.55", - "Port": 26602 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692493-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 199 - ] - }, - { - "Addr": { - "IP": "77.216.197.130", - "Port": 49129 - }, - "Src": { - "IP": "245.160.14.27", - "Port": 38908 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692517-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 58 - ] - }, - { - "Addr": { - "IP": "175.46.154.0", - "Port": 15297 - }, - "Src": { - "IP": "6.10.7.13", - "Port": 9657 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692583-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 89 - ] - }, - { - "Addr": { - "IP": "176.71.131.235", - "Port": 14342 - }, - "Src": { - "IP": "1.36.215.198", - "Port": 21709 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692206-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 135 - ] - }, - { - "Addr": { - "IP": "34.211.134.186", - "Port": 31608 - }, - "Src": { - "IP": "187.87.12.183", - "Port": 32977 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692221-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 71 - ] - }, - { - "Addr": { - "IP": "238.63.227.107", - "Port": 49502 - }, - "Src": { - "IP": "185.51.127.143", - "Port": 22728 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692483-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 61 - ] - }, - { - "Addr": { - "IP": "160.65.76.45", - "Port": 27307 - }, - "Src": { - "IP": "170.175.198.16", - "Port": 44759 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692051-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 36 - ] - }, - { - "Addr": { - "IP": "152.22.79.90", - "Port": 25861 - }, - "Src": { - "IP": "216.183.31.190", - "Port": 9185 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692409-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 163 - ] - }, - { - "Addr": { - "IP": "200.2.175.37", - "Port": 57270 - }, - "Src": { - "IP": "108.20.254.94", - "Port": 32812 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692434-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 96 - ] - }, - { - "Addr": { - "IP": "111.16.237.10", - "Port": 45200 - }, - "Src": { - "IP": "215.82.246.115", - "Port": 42333 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692469-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 21 - ] - }, - { - "Addr": { - "IP": "166.217.195.221", - "Port": 4579 - }, - "Src": { - "IP": "148.153.131.183", - "Port": 13848 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692498-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 78 - ] - }, - { - "Addr": { - "IP": "1.226.156.147", - "Port": 61660 - }, - "Src": { - "IP": "169.138.16.69", - "Port": 23455 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692548-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 121 - ] - }, - { - "Addr": { - "IP": "108.209.27.58", - "Port": 59102 - }, - "Src": { - "IP": "140.27.139.90", - "Port": 52154 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692014-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 53 - ] - }, - { - "Addr": { - "IP": "221.244.202.95", - "Port": 5032 - }, - "Src": { - "IP": "230.152.141.80", - "Port": 19457 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692168-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 106 - ] - }, - { - "Addr": { - "IP": "55.87.1.138", - "Port": 39686 - }, - "Src": { - "IP": "55.22.167.132", - "Port": 35663 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692258-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 174 - ] - }, - { - "Addr": { - "IP": "209.53.148.74", - "Port": 18502 - }, - "Src": { - "IP": "195.108.121.25", - "Port": 16730 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692304-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 180 - ] - }, - { - "Addr": { - "IP": "21.66.206.236", - "Port": 10771 - }, - "Src": { - "IP": "236.195.50.16", - "Port": 30697 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692368-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 22 - ] - }, - { - "Addr": { - "IP": "190.87.236.91", - "Port": 58378 - }, - "Src": { - "IP": "72.224.218.34", - "Port": 44817 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692459-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 127 - ] - }, - { - "Addr": { - "IP": "197.172.79.170", - "Port": 24958 - }, - "Src": { - "IP": "71.22.4.12", - "Port": 28558 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692036-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 251 - ] - }, - { - "Addr": { - "IP": "160.176.234.94", - "Port": 47013 - }, - "Src": { - "IP": "212.172.24.59", - "Port": 29594 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692062-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 99 - ] - }, - { - "Addr": { - "IP": "170.206.180.18", - "Port": 26212 - }, - "Src": { - "IP": "228.135.62.18", - "Port": 26164 - }, - "Attempts": 0, - "LastAttempt": "2017-11-07T01:11:34.692234-07:00", - "LastSuccess": "0001-01-01T00:00:00Z", - "BucketType": 1, - "Buckets": [ - 34 - ] - } - ] -} diff --git a/test/fuzz/p2p/pex/testdata/cases/empty b/test/fuzz/p2p/pex/testdata/cases/empty deleted file mode 100644 index e69de29bb..000000000 diff --git a/test/test_cover.sh b/test/test_cover.sh index 17df139e6..cad6bec6d 100644 --- a/test/test_cover.sh +++ b/test/test_cover.sh @@ -6,7 +6,7 @@ set -e echo "mode: atomic" > coverage.txt for pkg in ${PKGS[@]}; do - go test -timeout 5m -race -coverprofile=profile.out -covermode=atomic "$pkg" + go test -timeout 5m -race -coverprofile=profile.out "$pkg" if [ -f profile.out ]; then tail -n +2 profile.out >> coverage.txt; rm profile.out diff --git a/tools/tm-signer-harness/internal/test_harness.go b/tools/tm-signer-harness/internal/test_harness.go index 4d333949a..96eaaaff0 100644 --- a/tools/tm-signer-harness/internal/test_harness.go +++ b/tools/tm-signer-harness/internal/test_harness.go @@ -12,8 +12,8 @@ import ( "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/internal/state" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" diff --git a/types/evidence.go b/types/evidence.go index 40ff85e5e..330850ea3 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -46,15 +46,20 @@ type DuplicateVoteEvidence struct { var _ Evidence = &DuplicateVoteEvidence{} // NewDuplicateVoteEvidence creates DuplicateVoteEvidence with right ordering given -// two conflicting votes. If one of the votes is nil, evidence returned is nil as well -func NewDuplicateVoteEvidence(vote1, vote2 *Vote, blockTime time.Time, valSet *ValidatorSet) *DuplicateVoteEvidence { +// two conflicting votes. If either of the votes is nil, the val set is nil or the voter is +// not in the val set, an error is returned +func NewDuplicateVoteEvidence(vote1, vote2 *Vote, blockTime time.Time, valSet *ValidatorSet, +) (*DuplicateVoteEvidence, error) { var voteA, voteB *Vote - if vote1 == nil || vote2 == nil || valSet == nil { - return nil + if vote1 == nil || vote2 == nil { + return nil, errors.New("missing vote") + } + if valSet == nil { + return nil, errors.New("missing validator set") } idx, val := valSet.GetByAddress(vote1.ValidatorAddress) if idx == -1 { - return nil + return nil, errors.New("validator not in validator set") } if strings.Compare(vote1.BlockID.Key(), vote2.BlockID.Key()) == -1 { @@ -70,7 +75,7 @@ func NewDuplicateVoteEvidence(vote1, vote2 *Vote, blockTime time.Time, valSet *V TotalVotingPower: valSet.TotalVotingPower(), ValidatorPower: val.VotingPower, Timestamp: blockTime, - } + }, nil } // ABCI returns the application relevant representation of the evidence @@ -92,7 +97,7 @@ func (dve *DuplicateVoteEvidence) Bytes() []byte { pbe := dve.ToProto() bz, err := pbe.Marshal() if err != nil { - panic(err) + panic("marshaling duplicate vote evidence to bytes: " + err.Error()) } return bz @@ -260,11 +265,11 @@ func (l *LightClientAttackEvidence) ABCI() []abci.Evidence { func (l *LightClientAttackEvidence) Bytes() []byte { pbe, err := l.ToProto() if err != nil { - panic(err) + panic("converting light client attack evidence to proto: " + err.Error()) } bz, err := pbe.Marshal() if err != nil { - panic(err) + panic("marshaling light client attack evidence to bytes: " + err.Error()) } return bz } @@ -684,7 +689,11 @@ func NewMockDuplicateVoteEvidenceWithValidator(height int64, time time.Time, vB := voteB.ToProto() _ = pv.SignVote(context.Background(), chainID, vB) voteB.Signature = vB.Signature - return NewDuplicateVoteEvidence(voteA, voteB, time, NewValidatorSet([]*Validator{val})) + ev, err := NewDuplicateVoteEvidence(voteA, voteB, time, NewValidatorSet([]*Validator{val})) + if err != nil { + panic("constructing mock duplicate vote evidence: " + err.Error()) + } + return ev } func makeMockVote(height int64, round, index int32, addr Address, diff --git a/types/evidence_test.go b/types/evidence_test.go index 9d54797e4..5110bcb1d 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -85,7 +85,8 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) { vote1 := makeVote(t, val, chainID, math.MaxInt32, math.MaxInt64, math.MaxInt32, 0x02, blockID, defaultVoteTime) vote2 := makeVote(t, val, chainID, math.MaxInt32, math.MaxInt64, math.MaxInt32, 0x02, blockID2, defaultVoteTime) valSet := NewValidatorSet([]*Validator{val.ExtractIntoValidator(10)}) - ev := NewDuplicateVoteEvidence(vote1, vote2, defaultVoteTime, valSet) + ev, err := NewDuplicateVoteEvidence(vote1, vote2, defaultVoteTime, valSet) + require.NoError(t, err) tc.malleateEvidence(ev) assert.Equal(t, tc.expectErr, ev.ValidateBasic() != nil, "Validate Basic had an unexpected result") }) diff --git a/pkg/mempool/errors.go b/types/mempool.go similarity index 81% rename from pkg/mempool/errors.go rename to types/mempool.go index e3a9a2217..fa0f8a208 100644 --- a/pkg/mempool/errors.go +++ b/types/mempool.go @@ -1,14 +1,16 @@ -package mempool +package types import ( + "crypto/sha256" "errors" "fmt" ) -var ( - // ErrTxInCache is returned to the client if we saw tx earlier - ErrTxInCache = errors.New("tx already exists in cache") -) +// ErrTxInCache is returned to the client if we saw tx earlier +var ErrTxInCache = errors.New("tx already exists in cache") + +// TxKey is the fixed length array key used as an index. +type TxKey [sha256.Size]byte // ErrTxTooLarge defines an error when a transaction is too big to be sent in a // message to other peers. diff --git a/types/netaddress.go b/types/netaddress.go deleted file mode 100644 index bc074dca6..000000000 --- a/types/netaddress.go +++ /dev/null @@ -1,329 +0,0 @@ -// Modified for Tendermint -// Originally Copyright (c) 2013-2014 Conformal Systems LLC. -// https://github.com/conformal/btcd/blob/master/LICENSE - -package types - -import ( - "errors" - "flag" - "fmt" - "net" - "strconv" - "strings" - "time" -) - -// EmptyNetAddress defines the string representation of an empty NetAddress -const EmptyNetAddress = "" - -// NetAddress defines information about a peer on the network -// including its ID, IP address, and port. -type NetAddress struct { - ID NodeID `json:"id"` - IP net.IP `json:"ip"` - Port uint16 `json:"port"` -} - -// NewNetAddress returns a new NetAddress using the provided TCP -// address. When testing, other net.Addr (except TCP) will result in -// using 0.0.0.0:0. When normal run, other net.Addr (except TCP) will -// panic. Panics if ID is invalid. -// TODO: socks proxies? -func NewNetAddress(id NodeID, addr net.Addr) *NetAddress { - tcpAddr, ok := addr.(*net.TCPAddr) - if !ok { - if flag.Lookup("test.v") == nil { // normal run - panic(fmt.Sprintf("Only TCPAddrs are supported. Got: %v", addr)) - } else { // in testing - netAddr := NewNetAddressIPPort(net.IP("127.0.0.1"), 0) - netAddr.ID = id - return netAddr - } - } - - if err := id.Validate(); err != nil { - panic(fmt.Sprintf("Invalid ID %v: %v (addr: %v)", id, err, addr)) - } - - ip := tcpAddr.IP - port := uint16(tcpAddr.Port) - na := NewNetAddressIPPort(ip, port) - na.ID = id - return na -} - -// NewNetAddressIPPort returns a new NetAddress using the provided IP -// and port number. -func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress { - return &NetAddress{ - IP: ip, - Port: port, - } -} - -// NewNetAddressString returns a new NetAddress using the provided address in -// the form of "ID@IP:Port". -// Also resolves the host if host is not an IP. -// Errors are of type ErrNetAddressXxx where Xxx is in (NoID, Invalid, Lookup) -func NewNetAddressString(addr string) (*NetAddress, error) { - addrWithoutProtocol := removeProtocolIfDefined(addr) - spl := strings.Split(addrWithoutProtocol, "@") - if len(spl) != 2 { - return nil, ErrNetAddressNoID{addr} - } - - id, err := NewNodeID(spl[0]) - if err != nil { - return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} - } - - if err := id.Validate(); err != nil { - return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} - } - - addrWithoutProtocol = spl[1] - - // get host and port - host, portStr, err := net.SplitHostPort(addrWithoutProtocol) - if err != nil { - return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} - } - if len(host) == 0 { - return nil, ErrNetAddressInvalid{ - addrWithoutProtocol, - errors.New("host is empty")} - } - - ip := net.ParseIP(host) - if ip == nil { - ips, err := net.LookupIP(host) - if err != nil { - return nil, ErrNetAddressLookup{host, err} - } - ip = ips[0] - } - - port, err := strconv.ParseUint(portStr, 10, 16) - if err != nil { - return nil, ErrNetAddressInvalid{portStr, err} - } - - na := NewNetAddressIPPort(ip, uint16(port)) - na.ID = id - return na, nil -} - -// Equals reports whether na and other are the same addresses, -// including their ID, IP, and Port. -func (na *NetAddress) Equals(other interface{}) bool { - if o, ok := other.(*NetAddress); ok { - return na.String() == o.String() - } - return false -} - -// Same returns true is na has the same non-empty ID or DialString as other. -func (na *NetAddress) Same(other interface{}) bool { - if o, ok := other.(*NetAddress); ok { - if na.DialString() == o.DialString() { - return true - } - if na.ID != "" && na.ID == o.ID { - return true - } - } - return false -} - -// String representation: @: -func (na *NetAddress) String() string { - if na == nil { - return EmptyNetAddress - } - - addrStr := na.DialString() - if na.ID != "" { - addrStr = na.ID.AddressString(addrStr) - } - - return addrStr -} - -func (na *NetAddress) DialString() string { - if na == nil { - return "" - } - return net.JoinHostPort( - na.IP.String(), - strconv.FormatUint(uint64(na.Port), 10), - ) -} - -// Dial calls net.Dial on the address. -func (na *NetAddress) Dial() (net.Conn, error) { - conn, err := net.Dial("tcp", na.DialString()) - if err != nil { - return nil, err - } - return conn, nil -} - -// DialTimeout calls net.DialTimeout on the address. -func (na *NetAddress) DialTimeout(timeout time.Duration) (net.Conn, error) { - conn, err := net.DialTimeout("tcp", na.DialString(), timeout) - if err != nil { - return nil, err - } - return conn, nil -} - -// Routable returns true if the address is routable. -func (na *NetAddress) Routable() bool { - if err := na.Valid(); err != nil { - return false - } - // TODO(oga) bitcoind doesn't include RFC3849 here, but should we? - return !(na.RFC1918() || na.RFC3927() || na.RFC4862() || - na.RFC4193() || na.RFC4843() || na.Local()) -} - -// For IPv4 these are either a 0 or all bits set address. For IPv6 a zero -// address or one that matches the RFC3849 documentation address format. -func (na *NetAddress) Valid() error { - if err := na.ID.Validate(); err != nil { - return fmt.Errorf("invalid ID: %w", err) - } - - if na.IP == nil { - return errors.New("no IP") - } - if na.IP.IsUnspecified() || na.RFC3849() || na.IP.Equal(net.IPv4bcast) { - return errors.New("invalid IP") - } - return nil -} - -// Local returns true if it is a local address. -func (na *NetAddress) Local() bool { - return na.IP.IsLoopback() || zero4.Contains(na.IP) -} - -// ReachabilityTo checks whenever o can be reached from na. -func (na *NetAddress) ReachabilityTo(o *NetAddress) int { - const ( - Unreachable = 0 - Default = iota - Teredo - Ipv6Weak - Ipv4 - Ipv6Strong - ) - switch { - case !na.Routable(): - return Unreachable - case na.RFC4380(): - switch { - case !o.Routable(): - return Default - case o.RFC4380(): - return Teredo - case o.IP.To4() != nil: - return Ipv4 - default: // ipv6 - return Ipv6Weak - } - case na.IP.To4() != nil: - if o.Routable() && o.IP.To4() != nil { - return Ipv4 - } - return Default - default: /* ipv6 */ - var tunneled bool - // Is our v6 is tunneled? - if o.RFC3964() || o.RFC6052() || o.RFC6145() { - tunneled = true - } - switch { - case !o.Routable(): - return Default - case o.RFC4380(): - return Teredo - case o.IP.To4() != nil: - return Ipv4 - case tunneled: - // only prioritize ipv6 if we aren't tunneling it. - return Ipv6Weak - } - return Ipv6Strong - } -} - -// RFC1918: IPv4 Private networks (10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12) -// RFC3849: IPv6 Documentation address (2001:0DB8::/32) -// RFC3927: IPv4 Autoconfig (169.254.0.0/16) -// RFC3964: IPv6 6to4 (2002::/16) -// RFC4193: IPv6 unique local (FC00::/7) -// RFC4380: IPv6 Teredo tunneling (2001::/32) -// RFC4843: IPv6 ORCHID: (2001:10::/28) -// RFC4862: IPv6 Autoconfig (FE80::/64) -// RFC6052: IPv6 well known prefix (64:FF9B::/96) -// RFC6145: IPv6 IPv4 translated address ::FFFF:0:0:0/96 -var rfc1918_10 = net.IPNet{IP: net.ParseIP("10.0.0.0"), Mask: net.CIDRMask(8, 32)} -var rfc1918_192 = net.IPNet{IP: net.ParseIP("192.168.0.0"), Mask: net.CIDRMask(16, 32)} -var rfc1918_172 = net.IPNet{IP: net.ParseIP("172.16.0.0"), Mask: net.CIDRMask(12, 32)} -var rfc3849 = net.IPNet{IP: net.ParseIP("2001:0DB8::"), Mask: net.CIDRMask(32, 128)} -var rfc3927 = net.IPNet{IP: net.ParseIP("169.254.0.0"), Mask: net.CIDRMask(16, 32)} -var rfc3964 = net.IPNet{IP: net.ParseIP("2002::"), Mask: net.CIDRMask(16, 128)} -var rfc4193 = net.IPNet{IP: net.ParseIP("FC00::"), Mask: net.CIDRMask(7, 128)} -var rfc4380 = net.IPNet{IP: net.ParseIP("2001::"), Mask: net.CIDRMask(32, 128)} -var rfc4843 = net.IPNet{IP: net.ParseIP("2001:10::"), Mask: net.CIDRMask(28, 128)} -var rfc4862 = net.IPNet{IP: net.ParseIP("FE80::"), Mask: net.CIDRMask(64, 128)} -var rfc6052 = net.IPNet{IP: net.ParseIP("64:FF9B::"), Mask: net.CIDRMask(96, 128)} -var rfc6145 = net.IPNet{IP: net.ParseIP("::FFFF:0:0:0"), Mask: net.CIDRMask(96, 128)} -var zero4 = net.IPNet{IP: net.ParseIP("0.0.0.0"), Mask: net.CIDRMask(8, 32)} -var ( - // onionCatNet defines the IPv6 address block used to support Tor. - // bitcoind encodes a .onion address as a 16 byte number by decoding the - // address prior to the .onion (i.e. the key hash) base32 into a ten - // byte number. It then stores the first 6 bytes of the address as - // 0xfd, 0x87, 0xd8, 0x7e, 0xeb, 0x43. - // - // This is the same range used by OnionCat, which is part part of the - // RFC4193 unique local IPv6 range. - // - // In summary the format is: - // { magic 6 bytes, 10 bytes base32 decode of key hash } - onionCatNet = ipNet("fd87:d87e:eb43::", 48, 128) -) - -func (na *NetAddress) RFC1918() bool { - return rfc1918_10.Contains(na.IP) || - rfc1918_192.Contains(na.IP) || - rfc1918_172.Contains(na.IP) -} -func (na *NetAddress) RFC3849() bool { return rfc3849.Contains(na.IP) } -func (na *NetAddress) RFC3927() bool { return rfc3927.Contains(na.IP) } -func (na *NetAddress) RFC3964() bool { return rfc3964.Contains(na.IP) } -func (na *NetAddress) RFC4193() bool { return rfc4193.Contains(na.IP) } -func (na *NetAddress) RFC4380() bool { return rfc4380.Contains(na.IP) } -func (na *NetAddress) RFC4843() bool { return rfc4843.Contains(na.IP) } -func (na *NetAddress) RFC4862() bool { return rfc4862.Contains(na.IP) } -func (na *NetAddress) RFC6052() bool { return rfc6052.Contains(na.IP) } -func (na *NetAddress) RFC6145() bool { return rfc6145.Contains(na.IP) } -func (na *NetAddress) OnionCatTor() bool { return onionCatNet.Contains(na.IP) } - -func removeProtocolIfDefined(addr string) string { - if strings.Contains(addr, "://") { - return strings.Split(addr, "://")[1] - } - return addr - -} - -// ipNet returns a net.IPNet struct given the passed IP address string, number -// of one bits to include at the start of the mask, and the total number of bits -// for the mask. -func ipNet(ip string, ones, bits int) net.IPNet { - return net.IPNet{IP: net.ParseIP(ip), Mask: net.CIDRMask(ones, bits)} -} diff --git a/types/netaddress_test.go b/types/netaddress_test.go deleted file mode 100644 index 393d70e0b..000000000 --- a/types/netaddress_test.go +++ /dev/null @@ -1,183 +0,0 @@ -package types - -import ( - "net" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNetAddress_String(t *testing.T) { - tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080") - require.Nil(t, err) - - netAddr := NewNetAddress("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef", tcpAddr) - - var wg sync.WaitGroup - - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - defer wg.Done() - _ = netAddr.String() - }() - } - - wg.Wait() - - s := netAddr.String() - require.Equal(t, "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", s) -} - -func TestNewNetAddress(t *testing.T) { - tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080") - require.Nil(t, err) - - assert.Panics(t, func() { - NewNetAddress("", tcpAddr) - }) - - addr := NewNetAddress("deadbeefdeadbeefdeadbeefdeadbeefdeadbeef", tcpAddr) - assert.Equal(t, "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", addr.String()) - - assert.NotPanics(t, func() { - NewNetAddress("", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8000}) - }, "Calling NewNetAddress with UDPAddr should not panic in testing") -} - -func TestNewNetAddressString(t *testing.T) { - testCases := []struct { - name string - addr string - expected string - correct bool - }{ - {"no node id and no protocol", "127.0.0.1:8080", "", false}, - {"no node id w/ tcp input", "tcp://127.0.0.1:8080", "", false}, - {"no node id w/ udp input", "udp://127.0.0.1:8080", "", false}, - - { - "no protocol", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - true, - }, - { - "tcp input", - "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - true, - }, - { - "udp input", - "udp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - true, - }, - {"malformed tcp input", "tcp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - {"malformed udp input", "udp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - - // {"127.0.0:8080", false}, - {"invalid host", "notahost", "", false}, - {"invalid port", "127.0.0.1:notapath", "", false}, - {"invalid host w/ port", "notahost:8080", "", false}, - {"just a port", "8082", "", false}, - {"non-existent port", "127.0.0:8080000", "", false}, - - {"too short nodeId", "deadbeef@127.0.0.1:8080", "", false}, - {"too short, not hex nodeId", "this-isnot-hex@127.0.0.1:8080", "", false}, - {"not hex nodeId", "xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - - {"too short nodeId w/tcp", "tcp://deadbeef@127.0.0.1:8080", "", false}, - {"too short notHex nodeId w/tcp", "tcp://this-isnot-hex@127.0.0.1:8080", "", false}, - {"notHex nodeId w/tcp", "tcp://xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - { - "correct nodeId w/tcp", - "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - true, - }, - - {"no node id", "tcp://@127.0.0.1:8080", "", false}, - {"no node id or IP", "tcp://@", "", false}, - {"tcp no host, w/ port", "tcp://:26656", "", false}, - {"empty", "", "", false}, - {"node id delimiter 1", "@", "", false}, - {"node id delimiter 2", " @", "", false}, - {"node id delimiter 3", " @ ", "", false}, - } - - for _, tc := range testCases { - tc := tc - t.Run(tc.name, func(t *testing.T) { - addr, err := NewNetAddressString(tc.addr) - if tc.correct { - if assert.Nil(t, err, tc.addr) { - assert.Equal(t, tc.expected, addr.String()) - } - } else { - assert.NotNil(t, err, tc.addr) - } - }) - } -} - -func TestNewNetAddressIPPort(t *testing.T) { - addr := NewNetAddressIPPort(net.ParseIP("127.0.0.1"), 8080) - assert.Equal(t, "127.0.0.1:8080", addr.String()) -} - -func TestNetAddressProperties(t *testing.T) { - // TODO add more test cases - testCases := []struct { - addr string - valid bool - local bool - routable bool - }{ - {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true, true, false}, - {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@ya.ru:80", true, false, true}, - } - - for _, tc := range testCases { - addr, err := NewNetAddressString(tc.addr) - require.Nil(t, err) - - err = addr.Valid() - if tc.valid { - assert.NoError(t, err) - } else { - assert.Error(t, err) - } - assert.Equal(t, tc.local, addr.Local()) - assert.Equal(t, tc.routable, addr.Routable()) - } -} - -func TestNetAddressReachabilityTo(t *testing.T) { - // TODO add more test cases - testCases := []struct { - addr string - other string - reachability int - }{ - { - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8081", - 0, - }, - {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@ya.ru:80", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", 1}, - } - - for _, tc := range testCases { - addr, err := NewNetAddressString(tc.addr) - require.Nil(t, err) - - other, err := NewNetAddressString(tc.other) - require.Nil(t, err) - - assert.Equal(t, tc.reachability, addr.ReachabilityTo(other)) - } -} diff --git a/types/node_id.go b/types/node_id.go index c260aa117..a5db40159 100644 --- a/types/node_id.go +++ b/types/node_id.go @@ -31,8 +31,7 @@ func NewNodeID(nodeID string) (NodeID, error) { // IDAddressString returns id@hostPort. It strips the leading // protocol from protocolHostPort if it exists. func (id NodeID) AddressString(protocolHostPort string) string { - hostPort := removeProtocolIfDefined(protocolHostPort) - return fmt.Sprintf("%s@%s", id, hostPort) + return fmt.Sprintf("%s@%s", id, removeProtocolIfDefined(protocolHostPort)) } // NodeIDFromPubKey creates a node ID from a given PubKey address. diff --git a/types/node_info.go b/types/node_info.go index 9dbdbf70d..902ca759b 100644 --- a/types/node_info.go +++ b/types/node_info.go @@ -3,6 +3,9 @@ package types import ( "errors" "fmt" + "net" + "strconv" + "strings" "github.com/tendermint/tendermint/libs/bytes" tmstrings "github.com/tendermint/tendermint/libs/strings" @@ -74,17 +77,10 @@ func (info NodeInfo) ID() NodeID { // url-encoding), and we just need to be careful with how we handle that in our // clients. (e.g. off by default). func (info NodeInfo) Validate() error { - - // ID is already validated. - - // Validate ListenAddr. - _, err := NewNetAddressString(info.ID().AddressString(info.ListenAddr)) - if err != nil { + if _, _, err := ParseAddressString(info.ID().AddressString(info.ListenAddr)); err != nil { return err } - // Network is validated in CompatibleWith. - // Validate Version if len(info.Version) > 0 && (!tmstrings.IsASCIIText(info.Version) || tmstrings.ASCIITrim(info.Version) == "") { @@ -163,15 +159,6 @@ OUTER_LOOP: return nil } -// NetAddress returns a NetAddress derived from the NodeInfo - -// it includes the authenticated peer ID and the self-reported -// ListenAddr. Note that the ListenAddr is not authenticated and -// may not match that address actually dialed if its an outbound peer. -func (info NodeInfo) NetAddress() (*NetAddress, error) { - idAddr := info.ID().AddressString(info.ListenAddr) - return NewNetAddressString(idAddr) -} - // AddChannel is used by the router when a channel is opened to add it to the node info func (info *NodeInfo) AddChannel(channel uint16) { // check that the channel doesn't already exist @@ -244,3 +231,58 @@ func NodeInfoFromProto(pb *tmp2p.NodeInfo) (NodeInfo, error) { return dni, nil } + +// ParseAddressString reads an address string, and returns the IP +// address and port information, returning an error for any validation +// errors. +func ParseAddressString(addr string) (net.IP, uint16, error) { + addrWithoutProtocol := removeProtocolIfDefined(addr) + spl := strings.Split(addrWithoutProtocol, "@") + if len(spl) != 2 { + return nil, 0, errors.New("invalid address") + } + + id, err := NewNodeID(spl[0]) + if err != nil { + return nil, 0, err + } + + if err := id.Validate(); err != nil { + return nil, 0, err + } + + addrWithoutProtocol = spl[1] + + // get host and port + host, portStr, err := net.SplitHostPort(addrWithoutProtocol) + if err != nil { + return nil, 0, err + } + if len(host) == 0 { + return nil, 0, err + } + + ip := net.ParseIP(host) + if ip == nil { + ips, err := net.LookupIP(host) + if err != nil { + return nil, 0, err + } + ip = ips[0] + } + + port, err := strconv.ParseUint(portStr, 10, 16) + if err != nil { + return nil, 0, err + } + + return ip, uint16(port), nil +} + +func removeProtocolIfDefined(addr string) string { + if strings.Contains(addr, "://") { + return strings.Split(addr, "://")[1] + } + return addr + +} diff --git a/types/node_info_test.go b/types/node_info_test.go index 812cec184..ff30aa30a 100644 --- a/types/node_info_test.go +++ b/types/node_info_test.go @@ -173,3 +173,80 @@ func TestNodeInfoAddChannel(t *testing.T) { nodeInfo.AddChannel(2) require.Contains(t, nodeInfo.Channels, byte(0x02)) } + +func TestParseAddressString(t *testing.T) { + testCases := []struct { + name string + addr string + expected string + correct bool + }{ + {"no node id and no protocol", "127.0.0.1:8080", "", false}, + {"no node id w/ tcp input", "tcp://127.0.0.1:8080", "", false}, + {"no node id w/ udp input", "udp://127.0.0.1:8080", "", false}, + + { + "no protocol", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + true, + }, + { + "tcp input", + "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + true, + }, + { + "udp input", + "udp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + true, + }, + {"malformed tcp input", "tcp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + {"malformed udp input", "udp//deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + + // {"127.0.0:8080", false}, + {"invalid host", "notahost", "", false}, + {"invalid port", "127.0.0.1:notapath", "", false}, + {"invalid host w/ port", "notahost:8080", "", false}, + {"just a port", "8082", "", false}, + {"non-existent port", "127.0.0:8080000", "", false}, + + {"too short nodeId", "deadbeef@127.0.0.1:8080", "", false}, + {"too short, not hex nodeId", "this-isnot-hex@127.0.0.1:8080", "", false}, + {"not hex nodeId", "xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + + {"too short nodeId w/tcp", "tcp://deadbeef@127.0.0.1:8080", "", false}, + {"too short notHex nodeId w/tcp", "tcp://this-isnot-hex@127.0.0.1:8080", "", false}, + {"notHex nodeId w/tcp", "tcp://xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, + { + "correct nodeId w/tcp", + "tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", + true, + }, + + {"no node id", "tcp://@127.0.0.1:8080", "", false}, + {"no node id or IP", "tcp://@", "", false}, + {"tcp no host, w/ port", "tcp://:26656", "", false}, + {"empty", "", "", false}, + {"node id delimiter 1", "@", "", false}, + {"node id delimiter 2", " @", "", false}, + {"node id delimiter 3", " @ ", "", false}, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + addr, port, err := ParseAddressString(tc.addr) + if tc.correct { + require.Nil(t, err, tc.addr) + assert.Contains(t, tc.expected, addr.String()) + assert.Contains(t, tc.expected, fmt.Sprint(port)) + } else { + assert.Error(t, err, "%v", tc.addr) + } + }) + } +} diff --git a/types/protobuf.go b/types/protobuf.go index 7cd224665..f82965fbf 100644 --- a/types/protobuf.go +++ b/types/protobuf.go @@ -2,7 +2,7 @@ package types import ( abci "github.com/tendermint/tendermint/abci/types" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" ) //------------------------------------------------------- @@ -22,7 +22,7 @@ func (tm2pb) Validator(val *Validator) abci.Validator { // XXX: panics on unknown pubkey type func (tm2pb) ValidatorUpdate(val *Validator) abci.ValidatorUpdate { - pk, err := cryptoenc.PubKeyToProto(val.PubKey) + pk, err := encoding.PubKeyToProto(val.PubKey) if err != nil { panic(err) } @@ -52,7 +52,7 @@ type pb2tm struct{} func (pb2tm) ValidatorUpdates(vals []abci.ValidatorUpdate) ([]*Validator, error) { tmVals := make([]*Validator, len(vals)) for i, v := range vals { - pub, err := cryptoenc.PubKeyFromProto(v.PubKey) + pub, err := encoding.PubKeyFromProto(v.PubKey) if err != nil { return nil, err } diff --git a/types/protobuf_test.go b/types/protobuf_test.go index b6900f40c..a33d031e2 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -9,7 +9,7 @@ import ( abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" - cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" ) func TestABCIPubKey(t *testing.T) { @@ -19,9 +19,9 @@ func TestABCIPubKey(t *testing.T) { } func testABCIPubKey(t *testing.T, pk crypto.PubKey, typeStr string) error { - abciPubKey, err := cryptoenc.PubKeyToProto(pk) + abciPubKey, err := encoding.PubKeyToProto(pk) require.NoError(t, err) - pk2, err := cryptoenc.PubKeyFromProto(abciPubKey) + pk2, err := encoding.PubKeyFromProto(abciPubKey) require.NoError(t, err) require.Equal(t, pk, pk2) return nil diff --git a/types/tx.go b/types/tx.go index 92df92f13..19ee41dac 100644 --- a/types/tx.go +++ b/types/tx.go @@ -2,6 +2,7 @@ package types import ( "bytes" + "crypto/sha256" "errors" "fmt" @@ -16,15 +17,14 @@ import ( // Might we want types here ? type Tx []byte +// Key produces a fixed-length key for use in indexing. +func (tx Tx) Key() TxKey { return sha256.Sum256(tx) } + // Hash computes the TMHASH hash of the wire encoded transaction. -func (tx Tx) Hash() []byte { - return tmhash.Sum(tx) -} +func (tx Tx) Hash() []byte { return tmhash.Sum(tx) } // String returns the hex-encoded transaction as a string. -func (tx Tx) String() string { - return fmt.Sprintf("Tx{%X}", []byte(tx)) -} +func (tx Tx) String() string { return fmt.Sprintf("Tx{%X}", []byte(tx)) } // Txs is a slice of Tx. type Txs []Tx diff --git a/types/validation.go b/types/validation.go index 1bf0265db..e8f53f2a0 100644 --- a/types/validation.go +++ b/types/validation.go @@ -162,9 +162,9 @@ func verifyCommitBatch( var ( val *Validator valIdx int32 - seenVals = make(map[int32]int, len(commit.Signatures)) - batchSigIdxs = make([]int, 0, len(commit.Signatures)) - talliedVotingPower int64 = 0 + talliedVotingPower int64 + seenVals = make(map[int32]int, len(commit.Signatures)) + batchSigIdxs = make([]int, 0, len(commit.Signatures)) ) // attempt to create a batch verifier bv, ok := batch.CreateBatchVerifier(vals.GetProposer().PubKey) @@ -275,9 +275,9 @@ func verifyCommitSingle( var ( val *Validator valIdx int32 - seenVals = make(map[int32]int, len(commit.Signatures)) - talliedVotingPower int64 = 0 + talliedVotingPower int64 voteSignBytes []byte + seenVals = make(map[int32]int, len(commit.Signatures)) ) for idx, commitSig := range commit.Signatures { if ignoreSig(commitSig) { diff --git a/types/validator.go b/types/validator.go index fb3fa2d76..ded8156bf 100644 --- a/types/validator.go +++ b/types/validator.go @@ -7,7 +7,7 @@ import ( "strings" "github.com/tendermint/tendermint/crypto" - ce "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/crypto/encoding" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -114,7 +114,7 @@ func ValidatorListString(vals []*Validator) string { // as its redundant with the pubkey. This also excludes ProposerPriority // which changes every round. func (v *Validator) Bytes() []byte { - pk, err := ce.PubKeyToProto(v.PubKey) + pk, err := encoding.PubKeyToProto(v.PubKey) if err != nil { panic(err) } @@ -137,7 +137,7 @@ func (v *Validator) ToProto() (*tmproto.Validator, error) { return nil, errors.New("nil validator") } - pk, err := ce.PubKeyToProto(v.PubKey) + pk, err := encoding.PubKeyToProto(v.PubKey) if err != nil { return nil, err } @@ -159,7 +159,7 @@ func ValidatorFromProto(vp *tmproto.Validator) (*Validator, error) { return nil, errors.New("nil validator") } - pk, err := ce.PubKeyFromProto(vp.PubKey) + pk, err := encoding.PubKeyFromProto(vp.PubKey) if err != nil { return nil, err } diff --git a/types/validator_set_test.go b/types/validator_set_test.go index a69121344..87008bb1c 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -508,7 +508,7 @@ func TestAveragingInIncrementProposerPriority(t *testing.T) { {Address: []byte("c"), ProposerPriority: 1}}}, // this should average twice but the average should be 0 after the first iteration // (voting power is 0 -> no changes) - 11, 1 / 3}, + 11, 0}, 2: {ValidatorSet{ Validators: []*Validator{ {Address: []byte("a"), ProposerPriority: 100}, diff --git a/version/version.go b/version/version.go index 3fb08652e..e42952f77 100644 --- a/version/version.go +++ b/version/version.go @@ -10,7 +10,7 @@ const ( // TMVersionDefault is the used as the fallback version of Tendermint Core // when not using git describe. It is formatted with semantic versioning. - TMVersionDefault = "0.34.11" + TMVersionDefault = "0.35.0-unreleased" // ABCISemVer is the semantic version of the ABCI library ABCISemVer = "0.17.0"